[
  {
    "path": ".clang-format",
    "content": "---\n# Used for all options not set in this file\nBasedOnStyle:  LLVM\nAllowAllParametersOfDeclarationOnNextLine: false\nBinPackArguments: false\nBinPackParameters: false\nBreakConstructorInitializersBeforeComma: true\nAlwaysBreakTemplateDeclarations: Yes\nIndentWidth: 4\n"
  },
  {
    "path": ".gitattributes",
    "content": "*.git export-ignore\n*.github export-ignore\n/ci export-ignore\n/docker export-ignore\n\n*.DS_Store export-ignore\n*.gitattributes export-ignore\n/*.clang-format export-ignore\n*.gitignore export-ignore\n/_config.yml export-ignore\n/bors.toml export-ignore\n*.gitmodules export-ignore\n/.gitlab-ci.yml export-ignore\n"
  },
  {
    "path": ".github/tag-issue.md",
    "content": "---\ntitle: cmake project version {{ env.CMAKE_VERSION }} does not match git tag {{ env.GIT_VERSION }}\nlabels: bug\n---\nThe cmake version should be in sync with the git version to ensure the correct file names and sonames of shared libraries."
  },
  {
    "path": ".github/workflows/version_checker.yml",
    "content": "name: VersionChecker\n\non:\n  push:\n    tags:\n      - 'v*'\n\njobs:\n  checker:\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v3\n      with:\n          fetch-depth: 0\n    - name: Version check\n      id: check\n      run: |\n           mkdir build \n           cd build \n           cmake .. || true\n           CMAKE_VERSION=\"v$(cat CMakeCache.txt | grep '^CMAKE_PROJECT_VERSION\\b' | cut -d \"=\" -f2)\"\n           GIT_VERSION=$(git describe --tags)\n           if [ \"$CMAKE_VERSION\" != \"$GIT_VERSION\" ]; then\n             echo ::set-output name=CMAKE_ISSUE::yes\n             echo ::set-output name=CMAKE_VERSION::$CMAKE_VERSION\n             echo ::set-output name=GIT_VERSION::$GIT_VERSION\n           fi\n    - uses: JasonEtco/create-an-issue@v2.4.0\n      if: steps.check.outputs.CMAKE_ISSUE == 'yes'\n      env:\n        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        CMAKE_VERSION: ${{ steps.check.outputs.CMAKE_VERSION }}\n        GIT_VERSION: ${{ steps.check.outputs.GIT_VERSION }}\n      with:\n        filename: .github/tag-issue.md\n"
  },
  {
    "path": ".gitignore",
    "content": ".DS_Store*\n*.swp\n__pycache__\nbuild\nexports\ndoc\n.idea*\nCMakeLists.txt.user\n.vscode*\n"
  },
  {
    "path": ".gitlab-ci.yml",
    "content": "include:\n  - remote: 'https://gitlab.com/cscs-ci/recipes/-/raw/master/templates/v2/.cscs.yml'\n\nstages:\n  - build\n  - test\n\n##\n## BUILDS\n##\n.build_common:\n  extends: .dind\n  stage: build\n  only: ['master', 'staging', 'trying']\n  variables:\n    GIT_SUBMODULE_STRATEGY: recursive\n  before_script:\n    - docker login -u $CSCS_REGISTRY_USER -p $CSCS_REGISTRY_PASSWORD $CSCS_REGISTRY\n  script:\n    - docker build --network=host --cache-from $BUILD_IMAGE --build-arg BUILDKIT_INLINE_CACHE=1 -t $BUILD_IMAGE -f $BUILD_DOCKERFILE .\n    - docker push $BUILD_IMAGE\n    - docker build -t $DEPLOY_IMAGE --network=host --build-arg BUILDKIT_INLINE_CACHE=1 --build-arg BUILD_ENV=$BUILD_IMAGE -f $DEPLOY_DOCKERFILE .\n    - docker push $DEPLOY_IMAGE\n\n# Builds a Docker image for the current commit, cpu / gpu\nbuild sanitizer cpu:\n  extends: .build_common\n  variables:\n    BUILD_DOCKERFILE: docker/asan/build-env.Dockerfile\n    BUILD_IMAGE: $CSCS_REGISTRY_IMAGE/build-env-asan:latest\n    DEPLOY_DOCKERFILE: docker/asan/deploy.Dockerfile\n    DEPLOY_IMAGE: $CSCS_REGISTRY_IMAGE/deploy-cpu-asan:$CI_COMMIT_SHA\n\nbuild cpu:\n  extends: .build_common\n  variables:\n    BUILD_DOCKERFILE: docker/cpu-release/build-env.Dockerfile\n    BUILD_IMAGE: $CSCS_REGISTRY_IMAGE/build-env-cpu:latest\n    DEPLOY_DOCKERFILE: docker/cpu-release/deploy.Dockerfile\n    DEPLOY_IMAGE: $CSCS_REGISTRY_IMAGE/deploy-cpu:$CI_COMMIT_SHA\n\n\nbuild gpu:\n  extends: .build_common\n  variables:\n    BUILD_DOCKERFILE: docker/gpu/build-env.Dockerfile\n    BUILD_IMAGE: $CSCS_REGISTRY_IMAGE/build-env-gpu:latest\n    DEPLOY_DOCKERFILE: docker/gpu/deploy.Dockerfile\n    DEPLOY_IMAGE: $CSCS_REGISTRY_IMAGE/deploy-gpu:$CI_COMMIT_SHA\n\nsanitize:\n  stage: test\n  only: ['master', 'staging', 'trying']\n  trigger:\n    strategy: depend\n    include: /ci/sanitize.yml\n\ncpu test:\n  stage: test\n  only: ['master', 'staging', 'trying']\n  trigger:\n    strategy: depend\n    include: /ci/cpu.yml\n\ngpu test:\n  stage: test\n  only: ['master', 'staging', 'trying']\n  trigger:\n    strategy: depend\n    include: /ci/gpu.yml\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"libs/Tiled-MM\"]\n\tpath = libs/Tiled-MM\n\turl = https://github.com/eth-cscs/Tiled-MM.git\n[submodule \"libs/COSTA\"]\n\tpath = libs/COSTA\n\turl = https://github.com/eth-cscs/COSTA\n[submodule \"libs/cxxopts\"]\n\tpath = libs/cxxopts\n\turl = https://github.com/jarro2783/cxxopts\n"
  },
  {
    "path": "ATTRIBUTIONS.md",
    "content": "# COSMA Attributions:\n\nCOSMA uses the following external projects:\n- [COSTA](https://github.com/eth-cscs/COSTA): used for transforming between COSMA and SCALAPACK matrix data layouts and for transposing distributed matrices. Licensed under the [BSD-3-Clause License](https://github.com/eth-cscs/COSTA/blob/master/LICENSE).\n- [Tiled-MM](https://github.com/eth-cscs/Tiled-MM): used for performing `dgemm` calls with the GPU-backend. Licensed under the [BSD-3-Clause License](https://github.com/eth-cscs/Tiled-MM/blob/master/LICENSE).\n- [semiprof](https://github.com/bcumming/semiprof): used for profiling the code. Licensed under the [BSD-3-Clause License](https://github.com/bcumming/semiprof/blob/master/LICENSE).\n- [options](https://github.com/kabicm/options): used for parsing the command line options. Licensed under the [BSD-3-Clause License](https://github.com/kabicm/options/blob/master/LICENCE).\n- [cxxopts](https://github.com/jarro2783/cxxopts): user for parsing the command line options. Licensed under the [MIT License](https://github.com/jarro2783/cxxopts/blob/master/LICENSE).\n- [googletest](https://github.com/google/googletest): used for unit testing. Licensed under the [BSD-3-Clause License](https://github.com/google/googletest/blob/master/LICENSE).\n- [gtest_mpi](https://github.com/AdhocMan/gtest_mpi): used as a plugin for googletest adding the MPI support. Licensed under the [BSD-3-Clause License](https://github.com/AdhocMan/gtest_mpi/blob/master/LICENSE).\n- [interpose](https://github.com/ccurtsinger/interpose): used for dispatching some of the pxgemm calls to SCALAPACK. Licensed under the [MIT License](https://github.com/ccurtsinger/interpose/blob/master/COPYING.md).\n\nMost of these projects are added as submodules and can be found in the `libs` folder.\n"
  },
  {
    "path": "CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.24 FATAL_ERROR)\n\nproject(cosma\n  DESCRIPTION \"Communication Optimal Matrix Multiplication\"\n  HOMEPAGE_URL \"https://github.com/eth-cscs/COSMA\"\n  VERSION 2.8.4\n  LANGUAGES CXX)\n\ninclude(FetchContent)\n\nlist(APPEND CMAKE_MODULE_PATH \"${CMAKE_CURRENT_SOURCE_DIR}/cmake\")\nlist(APPEND CMAKE_MODULE_PATH \"${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules\")\ninclude(cmake/build_type.cmake)\ninclude(cmake/adjust_mpiexec_flags.cmake)\nset(CMAKE_EXPORT_COMPILE_COMMANDS \"YES\") # always write compile_commands.json\n\n# Options\n#\n\nset(COSMA_GPU_BACKENDS_LIST \"CUDA\" \"ROCM\")\nset(COSMA_SCALAPACK_LIST \"OFF\" \"MKL\" \"CRAY_LIBSCI\" \"NVPL\" \"CUSTOM\")\nset(COSMA_BLAS_LIST   \"auto\" \"MKL\" \"OPENBLAS\" \"CRAY_LIBSCI\" \"NVPL\" \"CUSTOM\" \"BLIS\" \"ATLAS\" \"CUDA\" \"ROCM\" \"OFF\")\noption(COSMA_WITH_TESTS \"Generate the test target.\" ON)\noption(COSMA_WITH_APPS \"Generate the miniapp targets.\" ON)\noption(COSMA_WITH_BENCHMARKS \"Generate the benchmark targets.\" ON)\noption(COSMA_WITH_PROFILING \"Enable profiling.\" OFF)\noption(COSMA_WITH_NCCL \"Use NCCL as communication backend.\" OFF)\noption(COSMA_WITH_RCCL \"Use RCCL as communication backend.\" OFF)\noption(COSMA_WITH_GPU_AWARE_MPI \"Use gpu-aware MPI for communication.\" OFF)\noption(COSMA_USE_UNIFIED_MEMORY \"Use unified memory when GPU acceleration is ON\" OFF)\noption(BUILD_SHARED_LIBS \"Build shared libraries.\" OFF)\nset(COSMA_SCALAPACK \"OFF\" CACHE STRING \"scalapack implementation. Can be MKL, CRAY_LIBSCI, NVPL, CUSTOM or OFF.\")\nset(COSMA_BLAS \"OFF\" CACHE STRING \"Blas library for computations on host or GPU\")\n\nset(COSMA_BLAS_VENDOR \"OFF\")\nset(COSMA_GPU_BACKEND \"OFF\")\n\nset_property(CACHE COSMA_SCALAPACK PROPERTY STRINGS ${COSMA_SCALAPACK_LIST})\nset_property(CACHE COSMA_BLAS PROPERTY STRINGS ${COSMA_BLAS_LIST})\n\n# we keep the old cosma behavior of indicating GPU support as a blas\n# implementation. We have to sort out what we should find for the FindBLAS and\n# GPU supports since they are treated as separate components\n\nif(COSMA_BLAS STREQUAL \"OFF\")\n    message(FATAL_ERROR \"A Blas implementation is needed when running on CPU only: choices are : auto, MKL, OPENBLAS, CRAY_LIBSCI, CUSTOM, BLIS, ATLAS, FLEXIBLAS, ARMPL, GenericBLAS, CUDA or ROCM\")\nendif()\n\nif (COSMA_BLAS MATCHES \"CUDA|ROCM\")\n  set(COSMA_GPU_BACKEND ${COSMA_BLAS})\n  set(COSMA_BLAS_VENDOR \"OFF\")\nelse()\n  set(COSMA_BLAS_VENDOR ${COSMA_BLAS})\n  set(COSMA_GPU_BACKEND \"OFF\")\nendif()\n\nif ((COSMA_WITH_NCCL OR COSMA_WITH_RCCL) AND NOT COSMA_GPU_BACKEND IN_LIST COSMA_GPU_BACKENDS_LIST)\n  message(FATAL_ERROR \"NCCL (RCCL) can only be used with the GPU backend set to CUDA (ROCM).\")\nendif()\n\nif (COSMA_WITH_GPU_AWARE_MPI AND NOT COSMA_GPU_BACKEND IN_LIST COSMA_GPU_BACKENDS_LIST)\n  message(FATAL_ERROR \"GPU-aware MPI can only be used with the GPU backend set to CUDA or ROCM.\")\nendif()\n\nif(NOT CMAKE_BUILD_TYPE)\n  set(CMAKE_BUILD_TYPE \"Release\")\nendif()\n\n# Dependencies\n# MPI\nset(MPI_DETERMINE_LIBRARY_VERSION TRUE)\nfind_package(MPI COMPONENTS CXX REQUIRED)\nadjust_mpiexec_flags()\n\n# check if scalapack backend is valid\nmessage(STATUS \"Selected SCALAPACK backend for COSMA: ${COSMA_SCALAPACK}\")\n\nif(NOT COSMA_SCALAPACK IN_LIST COSMA_SCALAPACK_LIST)\n  message(FATAL_ERROR \"Invalid value for COSMA_SCALAPACK!\")\nendif()\n\n# the blas targets are only defined when COSMA_SCALAPACK is ON whatever value of COSMA_GPU_BACKEND\nif (NOT COSMA_SCALAPACK MATCHES \"OFF\")\n  if (COSMA_SCALAPACK MATCHES \"MKL\" OR COSMA_SCALAPACK MATCHES \"CRAY_LIBSCI\" OR COSMA_SCALAPACK MATCHES \"NVPL\")\n    set(COSMA_BLAS_VENDOR ${COSMA_SCALAPACK})\n  else()\n    set(COSMA_BLAS_VENDOR \"auto\")\n  endif()\nendif()\n\n\nif (NOT COSMA_BLAS_VENDOR MATCHES \"OFF|CUDA|ROCM\")\n  find_package(Blas REQUIRED)\nendif()\n\nif (NOT COSMA_SCALAPACK MATCHES \"OFF\")\n  find_package(SCALAPACK REQUIRED)\nendif ()\n\nset(COSTA_WITH_PROFILING ${COSMA_WITH_PROFILING} CACHE INTERNAL \"\")\nset(COSTA_SCALAPACK ${COSMA_SCALAPACK} CACHE INTERNAL \"\")\n\nFetchContent_Declare(\n  costa\n  GIT_REPOSITORY https://github.com/eth-cscs/costa.git\n  GIT_TAG        2484769535772f807d402901ffca63bb6678dd42 # v2.3.0\n  FIND_PACKAGE_ARGS NAMES costa\n)\n\n# the joy of fetch_content. if we build costa and cosma together\n# fetch_content will pick up the FindSCALAPACK from cosma NOT costa.\nif (NOT TARGET costa::scalapack::scalapack AND NOT COSMA_SCALAPACK MATCHES \"OFF\")\n  add_library(costa::scalapack::scalapack ALIAS cosma::scalapack::scalapack)\nendif ()\n\nFetchContent_MakeAvailable(costa)\n\n# these are only GPU-backends\nif (COSMA_GPU_BACKEND MATCHES \"CUDA|ROCM\")\n  set(TILEDMM_GPU_BACKEND ${COSMA_GPU_BACKEND} CACHE INTERNAL \"\")\n  FetchContent_Declare(\n    Tiled-MM\n    GIT_REPOSITORY https://github.com/eth-cscs/Tiled-MM.git\n    GIT_TAG      0eb75179e670a04c649b50ae5e91bb71b43e4d06 # v2.3.2\n    FIND_PACKAGE_ARGS NAMES tiled-MM\n  )\n  FetchContent_MakeAvailable(Tiled-MM)\n\n  if (COSMA_WITH_NCCL)\n    find_package(CUDAToolkit REQUIRED)\n    find_package(NCCL REQUIRED)\n  elseif (COSMA_WITH_RCCL)\n    find_package(hip REQUIRED)\n    find_package(rccl REQUIRED)\n  endif()\n\n  if (NOT TARGET Tiled-MM::Tiled-MM)\n      message(\"Tiled-mm target not found\")\n  endif ()\n\nendif()\n\nif (COSMA_WITH_PROFILING)\n  FetchContent_Declare(\n    semiprof\n    GIT_REPOSITORY  https://github.com/bcumming/semiprof.git\n    GIT_TAG         f132142ff2215dfa073e416fa7911d8877d62752\n    FIND_PACKAGE_ARGS NAMES semiprof\n  )\n  FetchContent_MakeAvailable(semiprof)\nendif ()\n\nif (COSMA_WITH_TESTS OR COSMA_WITH_APPS)\n  FetchContent_Declare(\n    cxxopts\n    GIT_REPOSITORY https://github.com/jarro2783/cxxopts.git\n    GIT_TAG        4bf61f08697b110d9e3991864650a405b3dd515d # v3.2.1\n    FIND_PACKAGE_ARGS NAMES cxxopts\n  )\n  FetchContent_MakeAvailable(cxxopts)\nendif()\n\nif (NOT TARGET Tiled-MM::Tiled-MM)\n      message(\"Tiled-mm target not found\")\nendif ()\n# preserve rpaths when installing and make the install folder relocatable\n# use `CMAKE_SKIP_INSTALL_RPATH` to skip this\n# https://spack.readthedocs.io/en/latest/workflows.html#write-the-cmake-build\nlist(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES\n  \"${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}\" isSystemDir)\n# skip RPATH if COSMA is installed to system directories\nif(isSystemDir STREQUAL \"-1\")\n  set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)\n  if(APPLE)\n    set(basePoint @loader_path)\n  else()\n    set(basePoint $ORIGIN)\n  endif()\n  file(RELATIVE_PATH relDir ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}\n    ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR})\n  set(CMAKE_INSTALL_RPATH ${basePoint} ${basePoint}/${relDir})\nendif()\n\n# COSMA\n#\ninclude(CMakePackageConfigHelpers)\ninclude(GNUInstallDirs)\n\nadd_subdirectory(src/cosma)\n\ninstall(DIRECTORY \"${cosma_SOURCE_DIR}/src/cosma\"\n  DESTINATION \"${CMAKE_INSTALL_INCLUDEDIR}\"\n  FILES_MATCHING\n  PATTERN \"*.hpp\")\n\nwrite_basic_package_version_file(\n  \"${cosma_BINARY_DIR}/cosmaConfigVersion.cmake\"\n  VERSION ${cosma_VERSION}\n  COMPATIBILITY SameMajorVersion)\n\nconfigure_file(\"${cosma_SOURCE_DIR}/cmake/cosma.pc.in\"\n  \"${cosma_BINARY_DIR}/cosma.pc\"\n  @ONLY)\n\nconfigure_file(\"${cosma_SOURCE_DIR}/cmake/cosmaConfig.cmake.in\"\n  \"${cosma_BINARY_DIR}/cosmaConfig.cmake\"\n  @ONLY)\n\nwrite_basic_package_version_file(\n  \"${cosma_BINARY_DIR}/cosmaConfigVersion.cmake\"\n  VERSION \"${cosma_VERSION}\"\n  COMPATIBILITY SameMajorVersion)\n\ninstall(FILES \"${cosma_BINARY_DIR}/cosmaConfig.cmake\"\n  \"${cosma_BINARY_DIR}/cosmaConfigVersion.cmake\"\n  \"${cosma_BINARY_DIR}/cosmaConfigVersion.cmake\"\n  \"${cosma_SOURCE_DIR}/cmake/FindMKL.cmake\"\n  \"${cosma_SOURCE_DIR}/cmake/FindNVPL.cmake\"\n  \"${cosma_SOURCE_DIR}/cmake/FindBlas.cmake\"\n  \"${cosma_SOURCE_DIR}/cmake/FindSCALAPACK.cmake\"\n  \"${cosma_SOURCE_DIR}/cmake/FindOPENBLAS.cmake\"\n  \"${cosma_SOURCE_DIR}/cmake/FindFLEXIBLAS.cmake\"\n  \"${cosma_SOURCE_DIR}/cmake/FindARMPL.cmake\"\n  \"${cosma_SOURCE_DIR}/cmake/FindATLAS.cmake\"\n  \"${cosma_SOURCE_DIR}/cmake/FindCRAY_LIBSCI.cmake\"\n  \"${cosma_SOURCE_DIR}/cmake/FindGenericBLAS.cmake\"\n  \"${cosma_SOURCE_DIR}/cmake/FindNCCL.cmake\"\n  \"${cosma_SOURCE_DIR}/cmake/FindBLIS.cmake\"\n  DESTINATION \"${CMAKE_INSTALL_LIBDIR}/cmake/cosma\")\n\ninstall(FILES \"${cosma_BINARY_DIR}/cosma.pc\"\n  DESTINATION \"${CMAKE_INSTALL_LIBDIR}/pkgconfig\")\n\nif(COSMA_WITH_TESTS)\n  add_subdirectory(libs/gtest_mpi)\n  enable_testing()\n  add_subdirectory(tests)\nendif()\n\nif(COSMA_WITH_APPS)\n  add_subdirectory(miniapp)\nendif()\n\nif(COSMA_WITH_BENCHMARKS AND NOT COSMA_BLAS MATCHES \"OPENBLAS\")\n  add_subdirectory(benchmarks)\nendif()\n\n"
  },
  {
    "path": "INSTALL.md",
    "content": "## Building COSMA\n\nTo build COSMA, do the following steps:\n```bash\n# clone the repository\ngit clone --recursive https://github.com/eth-cscs/COSMA.git\ncd COSMA\n\n# create a build directory within COSMA\nmkdir build\ncd build\n\n# set up the compiler, e.g. with:\nexport CC=`which cc`\nexport CXX=`which CC`\n\n# Choose which BLAS and SCALAPACK backends to use (e.g. MKL)\ncmake -DCOSMA_BLAS=MKL -DCOSMA_SCALAPACK=MKL ..\n\n# compile\nmake -j 8\n```\n> !! Note the *--recursive* flag !!\n\nOther important options that can be passed to `cmake` are the following:\n- `COSMA_BLAS:` `MKL` (default), `OPENBLAS`, `CRAY_LIBSCI`, `CUSTOM`, `CUDA` or `ROCM`. Determines which backend will be used for the local matrix multiplication calls.\n- `COSMA_SCALAPACK:` OFF (default), `MKL`, `CRAY_LIBSCI`, `CUSTOM`. If specified, `COSMA` will also provide ScaLAPACK wrappers, thus offering `pdgemm`, `psgemm`, `pzgemm` and `pcgemm` functions, which completely match the ScaLAPACK API.\n\n## Building COSMA on Multi-GPU Systems\n\nCOSMA can take advantage of fast GPU-to-GPU interconnects like NV-Links, through the use of the following:\n- NCCL library (for NVIDIA GPUs), i.e. RCCL library (for AMD GPUs): when `-DCOSMA_WITH_NCCL=ON`, i.e. `-DCOSMA_WITH_RCCL=ON` is specified in `cmake`, all the collective communication is performed through these libraries, which can utilize fast gpu-to-gpu interconnects.\n- GPU-aware MPI: when `-DCOSMA_WITH_GPU_AWARE_MPI=ON` is specified in `cmake`, cuda-aware MPI for NVIDIA GPUs (i.e. rocm-aware MPI for AMD GPUs) will be used for collective communication. The user must make sure that the gpu-aware MPI is enabled. For example, on Cray-systems, this can be done by setting the following environment variables: \n    - `export MPICH_RDMA_ENABLED_CUDA=1`\n    - `export MPICH_GPU_SUPPORT_ENABLED=1`\n\n## Building COSMA on Cray Systems\n\nThere are already prepared scripts for loading the necessary dependencies for COSMA on Cray-Systems:\n- `Cray XC40` (CPU-only version): `source ./scripts/piz_daint_cpu.sh` loads `MKL` and other neccessary modules.\n- `Cray XC50` (Hybrid version): `source ./scripts/piz_daint_gpu.sh` loads `cublas` and other necessary modules.\n\nAfter the right modules are loaded, the instructions from the beginning of this file can be followed.\n\n## Installing COSMA\n\nTo install do `make install`.\n\n> !! Note: To set custom installation directory use `CMAKE_INSTALL_PREFIX` when building.\n\nCOSMA is CMake friendly and provides a cosmaConfig.cmake module for easy\nintegration into 3rd-party CMake projects with\n\n```\nfind_package(cosma REQUIRED)\ntarget_link_libraries( ... cosma::cosma)\n```\n\nCOSMA's dependencies are taken care of internally, nothing else needs to be\nlinked. Make sure to set `CMAKE_INSTALL_PREFIX` to COSMA's installation directory\nwhen building.\n\nThere is a rudimentary pkgconfig support; dependencies are handles explicitly by\nconsumers.\n\n# Installing COSMA with Spack\n\n- with OpenBLAS back end: `spack install cosma`\n- with MKL back end:      `spack install cosma ^mkl`\n- with GPU back end:      `spack install cosma +cuda`\n- with Netlib LAPACK:     `spack install cosma ^netlib-lapack`\n- with MKL ScaLAPACK:     `spack install cosma +scalapack ^mkl`\n\nNotes:\n- By default Spack builds in release mode with debug information included (-O2\n  -g). To build with -O3, add `build_type=Release` to the command line.\n- By default Spack selects openmpi as the MPI implementation, to select MPICH,\n  add `^mpich`\n\nFor more information on Spack: [Spack 101 Tutorial](https://spack.readthedocs.io/en/latest/tutorial.html).\n\n## Docker\n\nCOSMA can be installed into a Docker container in the following way:\n\n```\ndocker build -f docker/gpu/build-env.Dockerfile -t cosma-build-env .\ndocker build --build-arg BUILD_ENV=cosma-build-env -f docker/gpu/deploy.Dockerfile -t cosma .\n```\n\nThen the `cosma` container can be deployed for testing:\n\n```\ndocker run --rm -it -v (pwd):(pwd) --gpus all cosma\n```\n"
  },
  {
    "path": "LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, ETH Zürich.\nCopyright (c) 2021, Advanced Micro Devices, Inc.\nCopyright (c) 2018-2022, ETH Zürich.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n   may be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "README.md",
    "content": "[![pipeline status](https://gitlab.com/cscs-ci/eth-cscs/COSMA/badges/master/pipeline.svg)](https://gitlab.com/cscs-ci/eth-cscs/COSMA/-/commits/master)\n\n<p align=\"center\"><img src=\"./docs/cosma-logo.svg\" width=\"70%\"></p>\n\n## Table of Contents\n- [Overview](#overview)\n- [COSMA Literature](#cosma-literature)\n- [Features](#features)\n- [Building COSMA](#building-cosma)\n- [COSMA Dependencies](#cosma-dependencies)\n- [Using COSMA](#using-cosma)\n    - [30 seconds Tutorial](#using-cosma-in-30-seconds)\n- [COSMA on Multi-GPU Systems](#cosma-on-multi-gpu-systems)\n    - [Using NCCL/RCCL Libraries](using-ncclrccl-libraries)\n    - [Using GPU-aware MPI](#using-gpu-aware-mpi)\n- [COSMA in production](#cosma-in-production)\n    - [CP2K](#cp2k)\n    - [Julia language](#julia-language)\n- [Examples - Miniapps](#miniapps)\n    - [Matrix Multiplication with COSMA](#matrix-multiplication)\n    - [COSMA pxgemm wrapper](#cosma-pxgemm-wrapper)\n- [Tunable Parameters](#tunable-parameters)\n    - [Parameters Overview](#parameters-overview)\n    - [Controlling GPU memory](#controlling-gpu-memory)\n    - [Controlling CPU memory](#controlling-cpu-memory)\n- [Performance Profiling](#profiling)\n- [Authors](#authors)\n- [Questions?](#questions)\n- [Acknowledgements](#acknowledgements)\n\n## Overview\n\nCOSMA is a parallel, high-performance, GPU-accelerated, matrix-matrix multiplication algorithm that is communication-optimal for all combinations of matrix dimensions, number of processors and memory sizes, without the need for any parameter tuning. The key idea behind COSMA is to first derive a tight optimal sequential schedule and only then parallelize it, preserving I/O optimality between processes. This stands in contrast with the 2D and 3D algorithms, which fix process domain decomposition upfront and then map it to the matrix dimensions, which may result in asymptotically more communication. The final design of COSMA facilitates the overlap of computation and communication, ensuring speedups and applicability of modern mechanisms such as RDMA. COSMA allows to not utilize some processors in order to optimize the processor grid, which reduces the communication volume even further and increases the computation volume per processor.\n\nCOSMA got the **Best Student Paper Award** at the prestigious **Supercomputing 2019** conference in Denver, US.\n\nCOSMA alleviates the issues of current state-of-the-art algorithms, which can be summarized as follows:\n\n- `2D (SUMMA)`: Requires manual tuning and not communication-optimal in the presence of extra memory.\n- `2.5D`: Optimal for `m=n`, but inefficient for `m << n` or `n << m` and for some numbers of processes `p`.\n- `Recursive (CARMA)`: Asymptotically communication-optimal for all `m, n, k, p`, but splitting always the largest dimension might lead up to `√3` increase in communication volume.\n- `COSMA (this work)`: Strictly communication-optimal (not just asymptotically) for all `m, n, k, p` and memory sizes that yields the speedups by factor of up to 8.3x over the second-fastest algorithm.\n\nIn addition to being communication-optimal, this implementation is higly-optimized to reduce the memory footprint in the following sense:\n- `Buffer Reuse`: all the buffers are pre-allocated and carefully reused during execution, including the buffers necessary for the communication, which reduces the total memory usage.\n- `Reduced Local Data Movement`: the assignment of data blocks to processes is fully adapted to communication pattern, which minimizes the need of local data reshuffling that arise after each communication step.\n\nThe library supports both one-sided and two-sided MPI communication backends. It uses `dgemm` for the local computations, but also has a support for the `GPU` acceleration through our `Tiled-MM` library using `cublas` or `rocBLAS`.\n\n## COSMA Literature\n\nThe paper and other materials on COSMA are available under the following link:\n- **ACM Digital Library (Best Student Paper Award at SC19):** https://dl.acm.org/doi/10.1145/3295500.3356181\n- **Arxiv:** https://arxiv.org/abs/1908.09606\n- **YouTube Presentation:** https://www.youtube.com/watch?v=5wiZWw5ltR0\n- **Press Release:** https://www.cscs.ch/science/computer-science-hpc/2019/new-matrix-multiplication-algorithm-pushes-the-performance-to-the-limits/\n\n## Features\n\n- **[NEW] Multi-GPU Systems Support:** COSMA is now able to take advantage of fast GPU-to-GPU interconnects either through the use of NCCL/RCCL libraries or by using the GPU-aware MPI. Both, NVIDIA and AMD GPUs are supported.\n- **ScaLAPACK API Support:** it is enough to link to COSMA, without changing the code and all `p?gemm` calls will use ScaLAPACK wrappers provided by COSMA.\n- **C/Fortran Interface:** written in `C++`, but provides `C` and `Fortran` interfaces.\n- **Custom Types:** fully templatized types.\n- **GPU acceleration:** supports both **NVIDIA** and **AMD** GPUs.\n- **Supported BLAS (CPU) backends:** MKL, LibSci, NETLIB, BLIS, ATLAS.\n- **Custom Data Layout Support:** natively uses its own blocked data layout of matrices, but supports arbitrary grid-like data layout of matrices.\n- **Tranposition/Conjugation Support:** matrices `A` and `B` can be transposed and/or conjugated.\n- **Communication and Computation Overlap:** supports overlapping of communication and computation.\n- **Spack Installation:** can be built and installed with `Spack` since v14.1\n- **Julia Package:** see https://github.com/haampie/COSMA.jl/ on how to use COSMA in the Julia language.\n\n## Building COSMA\n\nSee [Installation Instructions](INSTALL.md).\n\n## COSMA Dependencies\n\nCOSMA is a CMake project and requires a recent CMake(>=3.17).\n\nExternal dependencies:\n\n- `MPI 3`: (required)\n- `BLAS`: when the problem becomes local, COSMA uses provided `?gemm` backend, which can be one of the following:\n     - `MKL` (default)\n     - `OPENBLAS`\n     - `BLIS`\n     - `ATLAS`\n     - `CRAY_LIBSCI`: `Cray-libsci` or `Cray-libsci_acc` (GPU-accelerated)\n     - `CUDA`: `cublas` is used for NVIDIA GPUs\n     - `ROCM`: `rocBLAS` is used for AMD GPUs\n     - `CUSTOM`: user-provided BLAS API\n\nSome dependencies are bundled as submodules and need not be installed explicitly:\n\n- `TiledMM` - cublasXt GEMM replacement, that is also ported to AMD GPUs.\n- `COSTA` - distributed matrix reshuffle and transpose algorithm.\n- `semiprof` - profiling utlility\n- `gtest_mpi` - MPI utlility wrapper over GoogleTest (unit testing library)\n\n## Using COSMA\n\nTo allow easy integration, COSMA can be used in the following ways:\n- **without changing your code:** if your code already uses the `ScaLAPACK API`, then you can just link to COSMA, before linking to any other library providing `pxgemm` and all `pxgemm` calls will be using COSMA, without the need to change your code at all. To get a feeling of the performance you can expect to get, please have a look at the [pdgemm miniapp](#cosma-pdgemm-wrapper). To see how you can link your code to COSMA `pxgemm`, have a look at the [30 seconds tutorial](#using-cosma-in-30-seconds) on how to do this. In this way, we integrated COSMA into CP2K quantum chemistry simulator, which you can read more about in the [production example](#cosma-in-production).\n\n- **adapting your code:** if your code is not using ScaLAPACK, then there are two interfaces that can be used:\n    - **custom layout:** if you matrices are distributed in a custom way, then it is eanough to pass the descriptors of your data layout to `multiply_using_layout` function, which will then adapt COSMA to your own layout.\n    - **native COSMA layout:** to get the maximum performance, the native COSMA matrix layout should be used. To get an idea of the performance you can expect to get, please have a look at the [matrix multiplication miniapp](#matrix-multiplication).\n\nThe documentation for the latter option will soon be published here.\n\n## Using COSMA in 30 seconds\n\nFor easy integration, it is enough to build COSMA with ScaLAPACK API and then link your code to COSMA before linking to any other library providing ScaLAPACK `pxgemm`. This way, all `pxgemm` calls will be using COSMA `pxgemm` wrappers. To achieve this, please follow these steps:\n\n1) Build COSMA with ScaLAPACK API:\n```bash\n###############\n# get COSMA\n###############\ngit clone --recursive https://github.com/eth-cscs/COSMA cosma && cd cosma\n\n##############################\n# build and install COSMA\n##############################\nmkdir build && cd build\n\n# set up the compiler, e.g. with:\nexport CC=`which cc`\nexport CXX=`which CC`\n\n# choose BLAS and SCALAPACK versions you want to use\n# COSMA_BLAS can be: MKL, OpenBLAS, CRAY_LIBSCI, CUDA, ROCM, CUSTOM\n# COSMA_SCALAPACK can be MKL, CRAY_LIBSCI, CUSTOM\ncmake -DCOSMA_BLAS=CUDA -DCOSMA_SCALAPACK=MKL -DCMAKE_INSTALL_PREFIX=<installation dir>/cosma ..\nmake -j 8\nmake install\n```\n> !! Note the *--recursive* flag !!\n\n2) Link your code to COSMA:\n    - **CPU-only** version of COSMA:\n       - link your code to:\n       > -L<installation dir>/cosma/lib64 -lcosma_pxgemm -lcosma -lcosta_scalapack\n\n       - then link to the BLAS and ScaLAPACK you built COSMA with (see `COSMA_BLAS` and `COSMA_SCALAPACK` flags in cmake):\n       > -L${MKLROOT}/lib/intel64 -Wl,--no-as-needed -lmkl_scalapack_lp64 -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lmkl_blacs_intelmpi_lp64 -lgomp -lpthread -lm\n\n\n   - using **GPU-accelerated** version of COSMA:\n       - link your code to:\n       >-L<installation dir>/cosma/lib64 -lcosma_pxgemm -lcosma -lcosta_scalapack -lTiled-MM\n\n       - link to the GPU backend you built COSMA with (see `COSMA_BLAS` flag in cmake):\n       >-lcublas -lcudart -lrt\n\n       - then link to the ScaLAPACK you built COSMA with (see `COSMA_SCALAPACK` flag in cmake):\n       >-L${MKLROOT}/lib/intel64 -Wl,--no-as-needed -lmkl_scalapack_lp64 -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lmkl_blacs_intelmpi_lp64 -lgomp -lpthread -lm\n\n3) Include headers:\n>-I<installation dir>/cosma/include\n\n## COSMA on Multi-GPU Systems\n\nCOSMA is able to take advantage of fast GPU-to-GPU interconnects on multi-gpu systems. This can be achieved in one of the following ways.\n\n### Using `NCCL/RCCL` Libraries\n\nWhen running `cmake` for COSMA, make sure to specify `-DCOSMA_WITH_NCCL=ON`, e.g. by doing:\n```bash\n    # NVIDIA GPUs\n    # this will looks for NCCL library in the following environment variables:\n    # - NCCL_ROOT: Base directory where all NCCL components are found\n    # - NCCL_INCLUDE_DIR: Directory where NCCL header is found\n    # - NCCL_LIB_DIR: Directory where NCCL library is found\n    cmake -DCOSMA_BLAS=CUDA -DCOSMA_SCALAPACK=MKL -DCOSMA_WITH_NCCL=ON ..\n\n    # AMD GPUs\n    # this will looks for RCCL library in the following environment variables:\n    # - RCCL_ROOT_DIR: Base directory where all RCCL components are found\n    # - RCCL_INCLUDE_DIR: Directory where RCCL header is found\n    # - RCCL_LIB_DIR: Directory where RCCL library is found\n    cmake -DCOSMA_BLAS=CUDA -DCOSMA_SCALAPACK=MKL -DCOSMA_WITH_NCCL=ON ..\n```\n### Using GPU-aware MPI\n\nWhen running `cmake` for COSMA, make sure that GPU-aware MPI is enabled in your environment and specify `-DCOSMA_WITH_GPU_AWARE_MPI=ON` when running cmake for COSMA, e.g. by doing:\n```bash\n    # Before running cmake, make sure that GPU-aware MPI is enabled on your system.\n    # For example, on Cray-systems, this can be done by setting the following environment variables:\n    # - export MPICH_RDMA_ENABLED_CUDA=1\n    # - export MPICH_GPU_SUPPORT_ENABLED=1\n    cmake -DCOSMA_BLAS=CUDA -DCOSMA_SCALAPACK=MKL -DCOSMA_WITH_GPU_AWARE_MPI=ON ..\n```\n\n## COSMA in Production\n\n### CP2K\n\nCOSMA is integrated into the [CP2K](https://www.cp2k.org) quantum chemistry simulator. Since COSMA provides ScaLAPACK API, it is enough to link CP2K to COSMA, without changing CP2K code at all, which makes the integration trivial even if (as in the case of CP2K) the simulation code is in written Fortran.\n\nIn the production run, we ran *Random-Phase Approximation (RPA)* benchmark of 128 water molecules, using the *Resolution of Identity (RI)*. The benchmark was run once on 1024 and once on 128 nodes of the GPU partition on [Piz Daint supercomputer](https://www.cscs.ch/computers/piz-daint/) (Cray XC50). Computationally, the most dominant part of this benchmark consists of 46 **tall-and-skinny** dense matrix multiplications, with the parameters shown in the table below:\n\n<p align=\"center\"><img src=\"./docs/cp2k-benchmark.svg\" width=\"80%\"></p>\n\nOn **1024 nodes**, we compared the performance of CP2K using `COSMA` and `Cray-libsci_acc` (version: 19.10.1), both being GPU accelerated, for all dense matrix-matrix multiplications (`pdgemm` routine). As can be seen in the following table, the version with COSMA was approximately **2x faster**.\n\n<p align=\"center\"><img src=\"./docs/cp2k-results-1024.svg\" width=\"60%\"></p>\n\nOn **128 nodes**, we compared the performance of CP2K using the following algorithms for multiplying matrices (`pdgemm` routine):  `MKL` (version: 19.0.1.144), `Cray-libsci` (version: 19.06.1), `Cray-libsci_acc` (version: 19.10.1, GPU accelerated) and `COSMA` (both CPU-only and GPU-accelerated versions) libraries. The version with COSMA was the fastest on both CPU and GPU. The CPU version of COSMA achieved the peak performance, whereas the GPU version achieved more than 65\\% of the peak performance of GPUs. Keep in mind that the peak performance of GPUs assumes the data is already residing on GPUs which is not the case here, since matrices were initially residing on CPU. This is one of the reasons why the peak performance is not achieved with the GPU version. Still, the GPU version of COSMA was **25-27\\%** faster than the second best in this case. The results are summarized in the following table:\n\n<p align=\"center\"><img src=\"./docs/cp2k-results-128.svg\" width=\"95%\"></p>\n\nWith COSMA, even higher speedups are possible, depending on matrix shapes. To illustrate possible performance gains, we also ran different **square matrix** multiplications on the same number of nodes (**=128**) of [Piz Daint supercomputer](https://www.cscs.ch/computers/piz-daint/). The block size is `128x128` and the processor grid is also square: `16x16` (2 ranks per node). The performance of COSMA is compared against Intel MKL ScaLAPACK (version: 19.0.1.144). The results on Cray XC50 (GPU-accelerated) and Cray XC40 (CPU-only) are summarized in the following table:\n\n<p align=\"center\"><img src=\"./docs/square-results.svg\" width=\"80%\"></p>\n\nAll the results from this section assumed matrices given in (block-cyclic) ScaLAPACK data layout. However, if the native COSMA layout is used, even higher throughput is possible.\n\n### Julia language\n\nThe [COSMA.jl](https://github.com/haampie/COSMA.jl/) Julia package uses COSMA's C-interface to provide COSMA-based matrix-matrix multiplication for the [DistributedArrays.jl](https://github.com/JuliaParallel/DistributedArrays.jl/) package. A minimal working example to multiply two random matrices looks as follows:\n\n```julia\nusing MPIClusterManager, DistributedArrays, Distributed\n\nmanager = MPIManager(np = 6)\naddprocs(manager)\nCOSMA.use_manager(manager)\n\n@everywhere using COSMA\n\nA = drand(8000, 8000) * drand(8000, 8000)\n```\n\n## Miniapps\n\n```bash\n# for CPU-only version\nsbatch schedule_miniapp_on_daint_cpu.sh\n# for Hybrid (CPU+GPU) version\nsbatch schedule_miniapp_on_daint_gpu.sh\n```\nThe script will use SLURM to submit a job on 10 nodes. The job will run 2 matrix\nmultiplications and output the time COSMA algorithm took.\n\n### Matrix Multiplication\n\nThe project contains a miniapp that produces two random matrices `A` and `B`,\ncomputes their product `C` with the COSMA algorithm and outputs the time of the\nmultiplication.\n\nThe miniapp consists of an executable `./build/miniapp/cosma_miniapp` which can\nbe run with the following command line (assuming we are in the root folder of\nthe project):\n\n```bash\n# set the number of threads to be used by each MPI rank\nexport OMP_NUM_THREADS=18\n# if using CPU version with MKL backend, set MKL_NUM_THREADS as well\nexport MKL_NUM_THREADS=18\n# run the miniapp\nmpirun -np 4 ./build/miniapp/cosma_miniapp -m 1000 -n 1000 -k 1000 -r 2\n```\n\nThe overview of all supported options is given below:\n- `-m (--m_dim)` (default: `1000`): number of rows of matrices `A` and `C`.\n- `-n (--n_dim)` (default: `1000`): number of columns of matrices `B` and `C`.\n- `-k (--k_dim)` (default: `1000`): number of columns of matrix `A` and rows of matrix `B`.\n- `-s (--steps)` (optional): string of triplets divided by comma defining the\n  splitting strategy. Each triplet defines one step of the algorithm. The first\n  character in the triplet defines whether it is a parallel (p) or a sequential\n  (s) step. The second character defines the dimension that is splitted in this\n  step. The third parameter is an integer which defines the divisor. This\n  parameter can be omitted. In that case the default strategy will be used. An example of a possible value for the upper example: `--steps=sm2,pn2,pk2`.\n- `-r (--n_rep)` (optional, default: `2`): the number of repetitions.\n- `-t (--type)` (optional, default: `double`): data type of matrix entries. Can be one of: `float`, `double`, `zfloat` and `zdouble`. The last two correspond to complex numbers.\n- `--test` (optional): if present, the result of COSMA will be verified with the result of the available SCALAPACK.\n- `-h (--help) (optional)`: print available options.\n\n### COSMA pxgemm wrapper\n\nCOSMA also contains a wrapper for ScaLAPACK `pxgemm` calls which offers scalapack interface (pxgemm functions with exactly the same signatures as ScaLAPACK). Running these functions will take care of transforming the matrices between ScaLAPACK and COSMA data layout, perform the multiplication using COSMA algorithm and transform the result back to the specified ScaLAPACK data layout.\n\nThe miniapp consists of an executable `./build/miniapp/pxgemm_miniapp` which can be run as follows (assuming we are in the root folder of the project):\n\n```bash\n# set the number of threads to be used by each MPI rank\nexport OMP_NUM_THREADS=18\n# if using CPU version with MKL backend, set MKL_NUM_THREADS as well\nexport MKL_NUM_THREADS=18\n# run the miniapp\nmpirun -np 4 ./build/miniapp/pxgemm_miniapp -m 1000 -n 1000 -k 1000 \\\n                                            --block_a=128,128 \\\n                                            --block_b=128,128 \\\n                                            --block_c=128,128 \\\n                                            --p_grid=2,2 \\\n                                            --transpose=NN \\\n                                            --type=double \\\n                                            --algorithm=cosma\n```\n\nThe overview of all supported options is given below:\n- `-m (--m_dim)` (default: `1000`): number of rows of matrices `A` and `C`.\n- `-n (--n_dim)` (default: `1000`): number of columns of matrices `B` and `C`.\n- `-k (--k_dim)` (default: `1000`): number of columns of matrix `A` and rows of matrix `B`.\n- `--block_a` (optional, default: `128,128`): 2D-block size for matrix A.\n- `--block_b` (optional, default `128,128`): 2D-block size for matrix B.\n- `--block_c` (optional, default `128,128`): 2D-block size for matrix C.\n- `-p (--p_grid)` (optional, default: `1,P`): 2D-processor grid. By default `1xP` where `P` is the total number of MPI ranks.\n- `--transpose` (optional, default: `NN`): transpose/conjugate flags to A and B.\n- `--alpha` (optional, default: 1): alpha parameter in `C = alpha*A*B + beta*C`.\n- `--beta` (optional, default: 0): beta parameter in `C = alpha*A*B + beta*C`.\n- `-r (--n_rep)` (optional, default: 2): number of repetitions.\n- `-t (--type)` (optional, default: `double`): data type of matrix entries. Can be one of: `float`, `double`, `zfloat` and `zdouble`. The last two correspond to complex numbers.\n- `--test` (optional): if present, the result of COSMA will be verified with the result of the available SCALAPACK.\n- `--algorithm` (optional, default: `both`): defines which algorithm (`cosma`, `scalapack` or `both`) to run.\n- `-h (--help) (optional)`: print available options.\n\n## Tunable Parameters\n\n### Parameters Overview\n\nThe overview of tunable parameters, that can be set through environment variables is given in the table below. The default values are given in **bold**.\n\nENVIRONMENT VARIABLE | POSSIBLE VALUES | DESCRIPTION\n| :------------------- | :------------------- |:------------------- |\n`COSMA_OVERLAP_COMM_AND_COMP` | ON, **OFF** | If enabled, commmunication and computation might be overlapped, depending on the built-in heuristics.\n`COSMA_ADAPT_STRATEGY` | **ON**, OFF | If enabled, COSMA will try to natively use the scalapack layout, without transforming to the COSMA layout.  Used only in the pxgemm wrapper.\n`COSMA_CPU_MAX_MEMORY` | integer (`size_t`), by default: **infinite** | CPU memory limit in megabytes per MPI process (rank). Allowing too little memory might reduce the performance.\n`COSMA_GPU_MEMORY_PINNING` | **ON**, OFF | If enabled, COSMA will pin parts of the host memory to speed up CPU-GPU memory transfers. Used only in the GPU backend.\n`COSMA_GPU_MAX_TILE_M`, `COSMA_GPU_MAX_TILE_N`, `COSMA_GPU_MAX_TILE_K` | integer (`size_t`), by default: **5000** | Tile sizes for each dimension, that are used to pipeline the local CPU matrices to GPU. `K` refers to the shared dimension and `MxN` refer to the dimensions of matrix `C`\n`COSMA_GPU_STREAMS` | integer (`size_t`), by default: **2** | The number of GPU streams that each rank should use.\n`COSMA_MEMORY_POOL_AMORTIZATION` | real (`double`), by default **1.2** | The growth factor for the memory pool. If equal to 1.2, then 1.2x the requested size is allocated (thus, 20% more than needed). Higher values better amortize the cost of the memory pool resizing which can occur when the algorithm is invoked for different matrix sizes. However, higher amortization values also mean that potentially more memory is allocated than used which can be a problem when the memory resource is tight.\n`COSMA_MIN_LOCAL_DIMENSION` | integer (`size_t`), by default: **200** | If any matrix dimension becomes smaller than this threshold (after splitting the matrices among the available MPI ranks), then the actual number of ranks is reduced so that all matrix dimensions stay at or above this limit.\n`COSMA_DIM_THRESHOLD` | integer (`size_t`), by default: **0** | In SCALAPACK wrappers, if any matrix dimension is less than this threshold, the problem is considered too small and is dispatched to SCALAPACK for computation. This only affects the SCALAPACK wrappers.\n`COSMA_CPU_MEMORY_ALIGNMENT` | integer (`size_t`), by default: **0** | The number of bytes to which all cpu (host) buffers will be aligned.\n\n\nThese are all optional parameters. They are used in runtime and hence changing any of those does not require the code to be recompiled.\n\nWe further discuss in details how to set the limits for both CPU and GPU memory that COSMA is allowed to use.\n\n### Controlling GPU memory\n\nControlling how much GPU memory COSMA is allowed to use can be done by specifying the tile dimensions as:\n```bash\nexport COSMA_GPU_MAX_TILE_M=5000\nexport COSMA_GPU_MAX_TILE_N=5000\nexport COSMA_GPU_MAX_TILE_K=5000\n```\nwhere `K` refers to the shared dimension and `MxN` refer to the dimensions of matrix `C`. By default, all tiles are square and have dimensions `5000x5000`.\n\nThese are only the maximum tiles and the actual tile sizes that will be used might be less, depending on the problem size. These variables are only used in the GPU backend for pipelining the local matrices to GPUs.\n\nIt is also possible to specify the number of GPU streams:\n```bash\nexport COSMA_GPU_STREAMS=2\n```\n\nThe values given here are the default values.\n\nThe algorithm will then require device memory for at most this many elements:\n```cpp\nnum_streams * (tile_m * tile_k + tile_k * tile_n + tile_m * tile_n)\n```\n\nTherefore, by changing the values of these variables, it is possible to control the usage of GPU memory.\n\n### Controlling CPU memory\n\nIn case the available CPU memory is a scarce resource, it is possible to set the CPU memory limit to COSMA, by exporting the following environment variable:\n```bash\nexport COSMA_CPU_MAX_MEMORY=1024 # in megabytes per MPI process (rank)\n```\nwhich will set the upper limit [in MB] on the memory that each MPI process (rank) is allowed to use. This might, however, reduce the performance.\n\nIn case the algorithm is not able to perform the multiplication within the given memory range, a `runtime_error` will be thrown.\n\n> This parameter is still in the testing phase!\n\n## Profiling\n\nUse `-DCOSMA_WITH_PROFILING=ON` to instrument the code. We use the profiler, called `semiprof`, written by Benjamin Cumming (https://github.com/bcumming).\n\nRunning the miniapp locally (from the project root folder) with the following command:\n\n```bash\nmpirun --oversubscribe -np 4 ./build/miniapp/cosma-miniapp -m 1000 -n 1000 -k 1000 -P 4\n```\n\nProduces the following output from rank 0:\n\n```\nMatrix dimensions (m, n, k) = (1000, 1000, 1000)\nNumber of processors: 4\n\n_p_ REGION                     CALLS      THREAD        WALL       %\n_p_ total                          -       0.110       0.110   100.0\n_p_   multiply                     -       0.098       0.098    88.7\n_p_     computation                2       0.052       0.052    47.1\n_p_     communication              -       0.046       0.046    41.6\n_p_       copy                     3       0.037       0.037    33.2\n_p_       reduce                   3       0.009       0.009     8.3\n_p_     layout                    18       0.000       0.000     0.0\n_p_   preprocessing                3       0.012       0.012    11.3\n```\n\nThe precentage is always relative to the first level above. All time measurements are in seconds.\n\n## Authors\n\n- Grzegorz Kwasniewski, Marko Kabic, Maciej Besta, Joost VandeVondele, Raffaele Solca, Torsten Hoefler\n\nCite as:\n```\n@inproceedings{cosma_algorithm_2019,\n  title={Red-blue pebbling revisited: Near optimal parallel matrix-matrix multiplication},\n  author={Kwasniewski, Grzegorz and Kabi{\\'c}, Marko and Besta, Maciej and VandeVondele, Joost and Solc{\\`a}, Raffaele and Hoefler, Torsten},\n  booktitle={Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis},\n  pages={1--22},\n  year={2019}\n}\n```\n\n## Questions?\n\nFor questions, feel free to contact us, and we will soon get back to you:\n- For questions regarding the implementation, contact Marko Kabic (marko.kabic@inf.ethz.ch), Teodor Nikolov (tnikolov@cscs.ch) or Simon Pintarelli (simon.pintarelli@cscs.ch).\n- For questions regarding the theory, contact Grzegorz Kwasniewski (gkwasnie@inf.ethz.ch).\n\n> If you need any help with the integration of COSMA into your library, we will be more than happy to help you!\n\n## Acknowledgements\n\nThis work was funded in part by:\n\n<img align=\"left\" height=\"50\" src=\"./docs/eth-logo.svg\"> | [**ETH Zurich**](https://ethz.ch/en.html)**: Swiss Federal Institute of Technology in Zurich**\n| :------------------- | :------------------- |\n<img align=\"left\" height=\"50\" src=\"./docs/cscs-logo.jpg\"> | [**CSCS**](https://www.cscs.ch)**: Swiss National Supercomputing Centre**\n<img align=\"left\" height=\"50\" src=\"./docs/pasc-logo.png\"> | [**PASC**](https://www.pasc-ch.org/)**: Platform for Advanced Scientific Computing**\n<img align=\"left\" height=\"50\" src=\"./docs/erc-logo.png\"> | [**ERC**](https://erc.europa.eu): **European Research Council** (Horizon2020, grant agreement DAPP, No.678880)\n<img align=\"left\" height=\"50\" src=\"./docs/max-logo.jpg\"> | [**MaX**](http://www.max-centre.eu): **Materials design at the Exascale** (Horizon2020, grant agreement MaX CoE, No. 824143.)\n\nWe thank Thibault Notargiacomo, Sam Yates, Benjamin Cumming and Simon Pintarelli for their generous contribution to the project: great ideas, useful advices and fruitful discussions.\n"
  },
  {
    "path": "_config.yml",
    "content": "theme: jekyll-theme-slate"
  },
  {
    "path": "benchmarks/CMakeLists.txt",
    "content": "include(find_cuda_version)\n\n################\n#  Build test  #\n################\nset(executables \"ubench-allgather\"\n                \"allgather-volume\"\n                \"sendrecv\"\n                \"reduce-scatter\"\n                \"blocking_vs_non_blocking\"\n                \"dgemm_perf_model\")\n\n# if (${COSMA_BLAS} STREQUAL \"MKL\")\n#     list(APPEND executables \"transpose\")\n# endif()\n\nforeach(exec ${executables})\n    add_executable(${exec} \"${exec}.cpp\")\n    target_link_libraries(${exec} cosma)\nendforeach()\n\nif (COSMA_GPU_BACKEND MATCHES \"CUDA\")\n  find_cuda_version()\n  # check if cuda toolkit version >= 10.1\n  # which is needed for cublasLt (used in the benchmark)\n  if (CUDA_TOOLKIT_MAJOR_VERSION GREATER 10 OR\n      (CUDA_TOOLKIT_MAJOR_VERSION EQUAL 10 AND CUDA_TOOLKIT_MINOR_VERSION GREATER_EQUAL 1))\n    add_executable(gpu_gemm_cublas \"gpu_gemm_cublas.cpp\")\n    target_link_libraries(gpu_gemm_cublas cosma Tiled-MM::Tiled-MM cublasLt cublas)\n    target_compile_definitions(gpu_gemm_cublas PRIVATE COSMA_HAVE_GPU)\n  endif()\nendif()\n"
  },
  {
    "path": "benchmarks/allgather-volume.cpp",
    "content": "#include <cosma/interval.hpp>\n#include <cosma/timer.hpp>\n\n#include <mpi.h>\n\n#include <algorithm>\n#include <cctype>\n#include <chrono>\n#include <cstdlib>\n#include <iostream>\n#include <string>\n#include <vector>\n\nusing namespace cosma;\n\nint main(int argc, char **argv) {\n    MPI_Init(&argc, &argv);\n\n    int P, rank;\n    MPI_Comm_size(MPI_COMM_WORLD, &P);\n    MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n\n    int base_size = 1 << 25;\n    int local_size = base_size;\n    int total_size = P * base_size;\n\n    std::vector<double> in(local_size);\n    std::vector<double> result(total_size);\n\n    const int n_rep = 10;\n\n    {\n        Timer time(n_rep, \"MPI_Allgather\");\n        for (int i = 0; i < n_rep; ++i) {\n            MPI_Allgather(in.data(),\n                          local_size,\n                          MPI_DOUBLE,\n                          result.data(),\n                          local_size,\n                          MPI_DOUBLE,\n                          MPI_COMM_WORLD);\n        }\n    }\n\n    MPI_Finalize();\n    return 0;\n}\n"
  },
  {
    "path": "benchmarks/bcast-volume.cpp",
    "content": "#include <cosma/interval.hpp>\n#include <cosma/timer.hpp>\n\n#include <mpi.h>\n\n#include <algorithm>\n#include <cctype>\n#include <cstdlib>\n#include <iostream>\n#include <string>\n#include <vector>\n#include <chrono>\n\nusing namespace cosma;\n\nint main( int argc, char **argv ) {\n    MPI_Init(&argc, &argv);\n\n    int P, rank;\n    MPI_Comm_size(MPI_COMM_WORLD, &P);\n    MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n\n    int base_size = 1 << 25;\n    int local_size = base_size;\n    int total_size = P * base_size;\n\n    std::vector<double> in(local_size);\n    std::vector<double> result(total_size);\n\n    const int n_rep = 10;\n\n    {\n        Timer time(n_rep, \"MPI_Allgather\");\n        for (int i = 0; i < n_rep; ++i) {\n            MPI_Allgather(in.data(), local_size, MPI_DOUBLE, result.data(),\n                    local_size, MPI_DOUBLE, MPI_COMM_WORLD);\n        }\n    }\n\n    MPI_Finalize();\n    return 0;\n}\n"
  },
  {
    "path": "benchmarks/blocking_vs_non_blocking.cpp",
    "content": "#include <cosma/local_multiply.hpp>\n\n#include <mpi.h>\n\n#include <chrono>\n#include <cmath>\n#include <iostream>\n#include <tuple>\n#include <unistd.h>\n#include <vector>\n\nclass Timer {\n  public:\n    using time_point =\n        std::chrono::time_point<std::chrono::high_resolution_clock>;\n\n    int n_rep_;\n    std::string region;\n    MPI_Comm comm_;\n    time_point start;\n\n    Timer(int n_rep, std::string reg = \"\", MPI_Comm comm = MPI_COMM_WORLD)\n        : n_rep_(n_rep)\n        , region(reg)\n        , comm_(comm) {\n        MPI_Barrier(comm);\n        start = std::chrono::high_resolution_clock::now();\n    }\n\n    ~Timer() {\n        auto finish = std::chrono::high_resolution_clock::now();\n        std::chrono::duration<double> elapsed = finish - start;\n        auto time =\n            std::chrono::duration_cast<std::chrono::milliseconds>(elapsed)\n                .count();\n        long long max_time, min_time, sum_time;\n        MPI_Reduce(&time, &max_time, 1, MPI_LONG_LONG, MPI_MAX, 0, comm_);\n        MPI_Reduce(&time, &min_time, 1, MPI_LONG_LONG, MPI_MIN, 0, comm_);\n        MPI_Reduce(&time, &sum_time, 1, MPI_LONG_LONG, MPI_SUM, 0, comm_);\n        int rank, size;\n        MPI_Comm_rank(comm_, &rank);\n        MPI_Comm_size(comm_, &size);\n        if (rank == 0) {\n            std::cout << region << \" MIN TIME [ms]: \" << 1.0 * min_time / n_rep_\n                      << std::endl;\n            std::cout << region << \" MAX TIME [ms]: \" << 1.0 * max_time / n_rep_\n                      << std::endl;\n            std::cout << region\n                      << \" AVG TIME [ms]: \" << 1.0 * sum_time / (n_rep_ * size)\n                      << std::endl;\n            std::cout << \"\\n\";\n        }\n    }\n};\n\nstd::pair<int, int> group_and_offset(int P, int divisor, int rank) {\n    int subset_size = P / divisor;\n    int subint_index = rank / subset_size;\n    int offset = rank - subint_index * subset_size;\n    return {subint_index, offset};\n}\n\nvoid solve(double *A, double *B, double *C, int m, int n, int k) {\n    // multiply square matrices with dimensions sqrt(local_size)\n    auto ctx = cosma::make_context<double>();\n    bool copy_c_back = true;\n    cosma::local_multiply(ctx, A, B, C, m, n, k, 1.0, 0.0, copy_c_back);\n}\n\nint main(int argc, char **argv) {\n    MPI_Init(&argc, &argv);\n\n    int P, rank;\n    MPI_Comm_size(MPI_COMM_WORLD, &P);\n    MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n\n    int divisor = 2;\n    int m = 5000;\n    int k = 2000;\n    int n = 2000;\n    int n_iter = 3;\n    size_t local_size = k * n / divisor;\n    float waiting_time = 0.7f;\n    std::vector<double> local_buffer(local_size);\n    std::vector<double> global_buffer(local_size * divisor);\n\n    std::vector<double> a(m / divisor * k);\n    std::vector<double> b(k * n);\n    std::vector<double> c(m / divisor * n);\n\n    // initialize dgemm\n    for (int i = 0; i < 5; ++i) {\n        solve(a.data(), b.data(), c.data(), m / divisor, n / divisor, k);\n    }\n\n    {\n        Timer dgemm_small(10, \"dgemm subproblem\");\n        for (int i = 0; i < 10; ++i) {\n            solve(a.data(), b.data(), c.data(), m / divisor, n / divisor, k);\n        }\n    }\n    {\n        Timer dgemm_large(10, \"dgemm large problem\");\n        for (int i = 0; i < 10; ++i) {\n            solve(a.data(), b.data(), c.data(), m / divisor, n, k);\n        }\n    }\n\n    int gp, off;\n    std::tie(gp, off) = group_and_offset(P, divisor, rank);\n    MPI_Comm subcom;\n    MPI_Comm_split(MPI_COMM_WORLD, off, gp, &subcom);\n\n    MPI_Request req[2 * (divisor - 1)];\n\n    int reqi = 0;\n    for (int i = 0; i < divisor; ++i) {\n        if (i != gp) {\n            int offset = i * local_size;\n\n            MPI_Recv_init(global_buffer.data() + offset,\n                          local_size,\n                          MPI_DOUBLE,\n                          i,\n                          0,\n                          subcom,\n                          &req[reqi]);\n            MPI_Send_init(local_buffer.data(),\n                          local_size,\n                          MPI_DOUBLE,\n                          i,\n                          0,\n                          subcom,\n                          &req[divisor - 1 + reqi]);\n            reqi++;\n        }\n    }\n\n    {\n        Timer timer_async(1, \"asynchronous\");\n        MPI_Startall(2 * (divisor - 1), req);\n\n        // do the work\n        solve(a.data(), b.data(), c.data(), m / divisor, n / divisor, k);\n        // usleep(waiting_time * 1e6);\n\n        for (int i = 0; i < divisor - 1; ++i) {\n            int idx = -1;\n            MPI_Waitany(divisor - 1, req, &idx, MPI_STATUS_IGNORE);\n            // if (idx >= rank) idx++;\n            solve(a.data(), b.data(), c.data(), m / divisor, n / divisor, k);\n            // usleep(waiting_time * 1e6);\n        }\n\n        MPI_Waitall(divisor - 1, req + divisor - 1, MPI_STATUSES_IGNORE);\n    }\n\n    MPI_Barrier(MPI_COMM_WORLD);\n\n    {\n        Timer timer_sync(1, \"synchronous\");\n\n        MPI_Allgather(local_buffer.data(),\n                      local_size,\n                      MPI_DOUBLE,\n                      global_buffer.data(),\n                      local_size,\n                      MPI_DOUBLE,\n                      subcom);\n\n        solve(a.data(), b.data(), c.data(), m / divisor, n, k);\n        // usleep(1e6 * divisor * waiting_time);\n    }\n\n    MPI_Comm_free(&subcom);\n\n    MPI_Finalize();\n}\n"
  },
  {
    "path": "benchmarks/dgemm_perf_model.cpp",
    "content": "#include <cosma/local_multiply.hpp>\n#include <cosma/timer.hpp>\n\n#include <algorithm>\n#include <chrono>\n#include <vector>\n\nusing namespace cosma;\n\ndouble sq_score(double a, double b) {\n    double result = ((1.0 * a / b) + (1.0 * b / a)) /\n                    (2.0 * std::max(1.0 * a / b, 1.0 * b / a));\n    // double result = std::min(a, b) / std::max(a, b);\n    return result;\n}\n\ndouble score(double m, double n, double k) {\n    double score_a = sq_score(m, k);\n    double score_b = sq_score(k, n);\n    double score_c = sq_score(m, n);\n    double result = score_a * score_b * score_c;\n    return result;\n}\n\ndouble throughput(double m, double n, double k, double time) {\n    return m * n * k * 2 / (1e6 * time);\n}\n\nstruct problem {\n    int m;\n    int n;\n    int k;\n\n    double time;\n    double score;\n\n    double tps;\n\n    problem() = default;\n    problem(int mm, int nn, int kk, double tt, double ss, double thr)\n        : m(mm)\n        , n(nn)\n        , k(kk)\n        , time(tt)\n        , score(ss)\n        , tps(thr) {}\n};\n\nint main(int argc, char **argv) {\n    std::vector<double> a;\n    std::vector<double> b;\n    std::vector<double> c;\n\n    int min_m = 1000;\n    int min_n = 1000;\n    int min_k = 1000;\n\n    int max_m = 50000;\n    int max_n = 1000;\n    int max_k = 1000;\n\n    int step_m = 500;\n    int step_n = 500;\n    int step_k = 500;\n\n    int n_rep = 2;\n\n    auto ctx = cosma::make_context<double>();\n\n    bool copy_c_back = true;\n\n    // run random dgemm in order to initialize it\n    for (int i = 0; i < n_rep; ++i) {\n        a = std::vector<double>(min_m * min_m);\n        b = std::vector<double>(min_m * min_m);\n        c = std::vector<double>(min_m * min_m);\n\n        local_multiply(\n            ctx, a.data(), b.data(), c.data(), min_m, min_m, min_m, 1.0, 0.0, copy_c_back);\n    }\n\n    std::vector<problem> timings;\n\n    for (int m = min_m; m <= max_m; m += step_m) {\n        for (int n = min_n; n <= max_n; n += step_n) {\n            for (int k = min_k; k <= max_k; k += step_k) {\n                auto start = std::chrono::high_resolution_clock::now();\n                for (int rep = 0; rep < n_rep; ++rep) {\n                    a = std::vector<double>(m * k);\n                    b = std::vector<double>(k * n);\n                    c = std::vector<double>(m * n);\n\n                    local_multiply(\n                        ctx, a.data(), b.data(), c.data(), m, n, k, 1.0, 0.0, copy_c_back);\n                }\n                auto finish = std::chrono::high_resolution_clock::now();\n                auto time =\n                    std::chrono::duration_cast<std::chrono::milliseconds>(\n                        finish - start)\n                        .count();\n                time /= 1.0 * n_rep;\n                double mul_score = score(m, n, k);\n                double tps = throughput(m, n, k, time);\n                problem prob(m, n, k, time, mul_score, tps);\n                timings.push_back(prob);\n            }\n        }\n    }\n\n    std::sort(timings.begin(),\n              timings.end(),\n              [](const problem &lhs, const problem &rhs) {\n                  return lhs.tps < rhs.tps;\n              });\n\n    for (auto &problem : timings) {\n        std::cout << problem.m << \" \" << problem.tps << \" \" << problem.score\n                  << std::endl;\n        // std::cout << \"(\" << problem.m << \", \" << problem.n << \", \" <<\n        // problem.k << \"), tps = \" << problem.tps << \", score = \" <<\n        // problem.score << std::endl;\n    }\n    return 0;\n}\n"
  },
  {
    "path": "benchmarks/gpu_gemm_cublas.cpp",
    "content": "#include <algorithm>\n#include <cosma/local_multiply.hpp>\n\n#include <cublasXt.h>\n#include <cublasLt.h>\n#include <cuda_runtime_api.h>\n#include <Tiled-MM/util.hpp>\n#include <Tiled-MM/mm_handle.hpp>\n#include <random>\n\n#include <chrono>\n#include <vector>\n#include <iostream>\n\ntemplate <typename T>\nvoid fill_matrix(T* ptr, size_t size) {\n    static std::random_device dev;                        // seed\n    static std::mt19937 rng(dev());                       // generator\n    static std::uniform_real_distribution<T> dist(10.0); // distribution\n\n    for (unsigned i = 0; i < size; ++i) {\n        ptr[i] = T{dist(rng)};\n    }\n}\n\nstd::vector<long> tiled_mm_dgemm(int n_iter, int m, int n, int k) {\n    auto gpu_ctx = gpu::make_context<double>(2, 4000, 4000, 4000);\n\n    std::vector<double> aa(m * k);\n    std::vector<double> bb(k * n);\n    std::vector<double> cc(m * n);\n\n    double *a = aa.data();\n    double *b = bb.data();\n    double *c = cc.data();\n\n    double alpha = 1.0;\n    double beta = 0.0;\n\n    std::vector<long> times(n_iter);\n    for (int i = 0; i < n_iter; ++i) {\n        fill_matrix(a, aa.size());\n        fill_matrix(b, bb.size());\n        if (beta > 0) {\n            fill_matrix(c, cc.size());\n        }\n\n        // perform dgemm\n        auto start = std::chrono::steady_clock::now();\n        cosma::local_multiply(gpu_ctx.get(), a, b, c, m, n, k, alpha, beta);\n        auto end = std::chrono::steady_clock::now();\n\n        times[i] = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();\n    }\n\n    std::sort(times.begin(), times.end());\n    return times;\n}\n\nstd::vector<long> cublasXt_dgemm(int n_iter, int m, int n, int k) {\n    auto status=\n    cudaSetDevice(0);\n    gpu::check_runtime_status(status);\n\n    cublasXtHandle_t handle;\n    auto cublas_status = cublasXtCreate(&handle);\n    gpu::check_blas_status(cublas_status);\n    int devices[1] = {0};\n    cublasXtDeviceSelect(handle, 1, devices);\n    // cublasXtSetCpuRoutine(handle, CUBLASXT_GEMM, CUBLASXT_DOUBLE, (void*)(&dgemm_));\n    // cublasXtSetCpuRatio(handle, CUBLASXT_GEMM, CUBLASXT_DOUBLE, 0.2);\n    // cublasXtSetPinningMemMode(handle, CUBLASXT_PINNING_ENABLED);\n    // cublasXtSetBlockDim(handle, 4000);\n\n    std::vector<double> aa(m * k);\n    std::vector<double> bb(k * n);\n    std::vector<double> cc(m * n);\n\n    double *a = aa.data();\n    double *b = bb.data();\n    double *c = cc.data();\n\n    double alpha = 1.0;\n    double beta = 0.0;\n\n    std::vector<long> times(n_iter);\n\n    for (int i = 0; i < n_iter; ++i) {\n        fill_matrix(a, aa.size());\n        fill_matrix(b, bb.size());\n        if (beta > 0) {\n            fill_matrix(c, cc.size());\n        }\n\n        // perform dgemm\n        auto start = std::chrono::steady_clock::now();\n        auto status = cublasXtDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N,\n            m, n, k, &alpha, a, m, b, k, &beta, c, m);\n        gpu::check_blas_status(status);\n        cudaDeviceSynchronize();\n        auto end = std::chrono::steady_clock::now();\n\n        times[i] = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();\n    }\n\n    std::sort(times.begin(), times.end());\n\n    // finalization \n    if (handle)\n        cublasXtDestroy(handle);\n\n    return times;\n}\n\n/*\n// cublasLt assumes device pointers\nstd::vector<long> cublasLt_dgemm(int n_iter, int m, int n, int k) {\n    auto runtime_status=\n    cudaSetDevice(0);\n    gpu::check_runtime_status(runtime_status);\n\n    cublasLtHandle_t handle;\n    auto status = cublasLtCreate(&handle);\n    gpu::check_blas_status(status);\n    // int devices[1] = {0};\n    // cublasLtDeviceSelect(handle, 1, devices);\n\n    // std::vector<double> aa(m * k);\n    // std::vector<double> bb(k * n);\n    // std::vector<double> cc(m * n);\n\n    double *a = gpu::malloc_device<double>(m * k);\n    double *b = gpu::malloc_device<double>(k * n);\n    double *c = gpu::malloc_device<double>(m * n);\n\n    double alpha = 1.0;\n    double beta = 0.0;\n\n    std::size_t workspaceSize = 4000;\n    std::size_t workspaceSizeBytes = workspaceSize * sizeof(double);\n    auto workspace = gpu::malloc_device<double>(workspaceSize);\n\n    auto transa = CUBLAS_OP_N;\n    auto transb = CUBLAS_OP_N;\n\n    cublasLtMatmulDesc_t operationDesc = nullptr;\n    cublasLtMatrixLayout_t Adesc = nullptr;\n    cublasLtMatrixLayout_t Bdesc = nullptr;\n    cublasLtMatrixLayout_t Cdesc = nullptr;\n    cublasLtMatmulPreference_t preference = nullptr;\n\n    int returnedResults = 0;\n    cublasLtMatmulHeuristicResult_t heuristicResult = {};\n\n    status = cublasLtMatmulDescCreate(&operationDesc, CUDA_R_64F);\n    gpu::check_blas_status(status);\n\n    status = cublasLtMatmulDescSetAttribute(operationDesc, \n            CUBLASLT_MATMUL_DESC_TRANSA, &transa, sizeof(transa));\n    gpu::check_blas_status(status);\n    status = cublasLtMatmulDescSetAttribute(operationDesc, \n            CUBLASLT_MATMUL_DESC_TRANSB, &transb, sizeof(transb));\n    gpu::check_blas_status(status);\n\n    status = cublasLtMatrixLayoutCreate(&Adesc, CUDA_R_64F, m, k, m);\n    gpu::check_blas_status(status);\n    status = cublasLtMatrixLayoutCreate(&Bdesc, CUDA_R_64F, k, n, k);\n    gpu::check_blas_status(status);\n    status = cublasLtMatrixLayoutCreate(&Cdesc, CUDA_R_64F, m, n, m);\n    gpu::check_blas_status(status);\n\n    std::cout << \"Created matrix layouts.\" << std::endl;\n\n    status = cublasLtMatmulPreferenceCreate(&preference);\n    gpu::check_blas_status(status);\n\n    status = cublasLtMatmulPreferenceSetAttribute(\n        preference, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, \n        &workspaceSizeBytes, sizeof(workspaceSizeBytes));\n    gpu::check_blas_status(status);\n\n    std::cout << \"Set up preferences.\" << std::endl;\n\n    status = cublasLtMatmulAlgoGetHeuristic(\n        handle, operationDesc, Adesc, Bdesc, Cdesc, Cdesc, \n        preference, 1, &heuristicResult, &returnedResults);\n    gpu::check_blas_status(status);\n\n    if (returnedResults == 0) {\n        std::cout << \"No algorithm was returned.\" << std::endl;\n        status = CUBLAS_STATUS_NOT_SUPPORTED;\n        gpu::check_blas_status(status);\n    }\n\n    std::cout << \"Chose the algorithm.\" << std::endl;\n\n    std::vector<long> times(n_iter);\n\n    for (int i = 0; i < n_iter; ++i) {\n        // fill_matrix(a, m * k);\n        // fill_matrix(b, k * n);\n        // if (beta > 0) {\n        //     fill_matrix(c, m * n);\n        // }\n\n        // perform dgemm\n        auto start = std::chrono::steady_clock::now();\n        status = cublasLtMatmul(handle,\n                               operationDesc,\n                               &alpha,\n                               a,\n                               Adesc,\n                               b,\n                               Bdesc,\n                               &beta,\n                               c,\n                               Cdesc,\n                               c,\n                               Cdesc,\n                               &heuristicResult.algo,\n                               workspace,\n                               workspaceSizeBytes,\n                               0);\n        gpu::check_blas_status(status);\n        cudaDeviceSynchronize();\n        auto end = std::chrono::steady_clock::now();\n\n        times[i] = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();\n    }\n\n    std::sort(times.begin(), times.end());\n\n    // finalization \n    if (handle)\n        cublasLtDestroy(handle);\n    // Descriptors are no longer needed as all GPU work was already\n   // enqueued.\n   if (preference) \n       status = cublasLtMatmulPreferenceDestroy(preference);\n   if (Cdesc) \n       status = cublasLtMatrixLayoutDestroy(Cdesc);\n   if (Bdesc) \n       status = cublasLtMatrixLayoutDestroy(Bdesc);\n   if (Adesc) \n       status = cublasLtMatrixLayoutDestroy(Adesc);\n   if (operationDesc) \n       status = cublasLtMatmulDescDestroy(operationDesc);\n    gpu::check_blas_status(status);\n\n    return times;\n}\n*/\n\nint main(int argc, char* argv[]) {\n    // std::vector<int> dims = {500, 1000, 2000, 4000, 8000, 16000, 32000};\n    std::vector<int> dims = {4000, 8000, 12000, 16000, 20000, 24000, 28000, 32000};\n    int n_iter = 2;\n\n    std::vector<long> times(n_iter);\n\n    for (const int& dim : dims) {\n        std::cout << \"Dimension = \" << dim << std::endl;\n        /*\n        // cublasLt\n        times = cublasLt_dgemm(n_iter, dim, dim, dim);\n        std::cout << \"cublasLt: \";\n        for (const auto& time : times) {\n            std::cout << time << \", \";\n        }\n        std::cout << std::endl;\n        */\n\n        // cublasXt\n        times = cublasXt_dgemm(n_iter, dim, dim, dim);\n        std::cout << \"cublasXt: \";\n        for (const auto& time : times) {\n            std::cout << time << \", \";\n        }\n        if (times.size()) {\n            std::cout << \"highest throughtput [Glop/s]: \" << 2.0*dim*dim*dim/(1e6*times[0]);\n        }\n        std::cout << std::endl;\n\n        // tiled-mm\n        times = tiled_mm_dgemm(n_iter, dim, dim, dim);\n        std::cout << \"Tiled-MM: \";\n        for (const auto& time : times) {\n            std::cout << time << \", \";\n        }\n        if (times.size()) {\n            std::cout << \"highest throughtput [Glop/s]: \" << 2.0*dim*dim*dim/(1e6*times[0]);\n        }\n        std::cout << std::endl;\n    }\n\n}\n"
  },
  {
    "path": "benchmarks/gpu_gemm_libsci_acc.cpp",
    "content": "\n#include <libsci_acc.h>\n\n#include <chrono>\n#include <vector>\n#include <iostream>\n\nlong libsci_acc_dgemm(int m, int n, int k) {\n    double* a, *b, *c;\n\n    double alpha = 1.0;\n    double beta = 0.0;\n\n    libsci_acc_HostAlloc((void**)&a, sizeof(double)*m*k);\n    libsci_acc_HostAlloc((void**)&b, sizeof(double)*k*n);\n    libsci_acc_HostAlloc((void**)&c, sizeof(double)*m*n);\n\n    // perform dgemm\n    auto start = std::chrono::steady_clock::now();\n    dgemm('n', 'n', m, n, k, alpha, a, m, b, k, beta, c, m);\n    auto end = std::chrono::steady_clock::now();\n\n    libsci_acc_HostFree(a);\n    libsci_acc_HostFree(b);\n    libsci_acc_HostFree(c);\n\n    return std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();\n}\n\nint main(int argc, char* argv[]) {\n    // initialization\n    libsci_acc_init();\n\n    // std::vector<int> dims = {500, 1000, 2000, 4000, 8000, 16000, 32000};\n    std::vector<int> dims = {32000};\n    int n_iter = 1;\n\n    for (const int& dim : dims) {\n        std::cout << \"Dimension = \" << dim << std::endl;\n        double t_avg_libsci = 0;\n\n        for (int i = 0; i < n_iter+1; ++i) {\n            long t_libsci = libsci_acc_dgemm(dim, dim, dim);\n\n            if (i == 0) continue;\n\n            t_avg_libsci += t_libsci;\n        }\n        std::cout << \"libsci average time [ms]: \" << 1.0*t_avg_libsci/n_iter << std::endl;\n    }\n    libsci_acc_finalize();\n}\n"
  },
  {
    "path": "benchmarks/reduce-scatter.cpp",
    "content": "#include <cosma/interval.hpp>\n#include <cosma/timer.hpp>\n\n#include <mpi.h>\n\n#include <algorithm>\n#include <cctype>\n#include <cstdlib>\n#include <iostream>\n#include <string>\n#include <vector>\n#include <chrono>\n\nusing namespace cosma;\n\nint main( int argc, char **argv ) {\n    MPI_Init(&argc, &argv);\n\n    int P, rank;\n    MPI_Comm_size(MPI_COMM_WORLD, &P);\n    MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n\n    const int n_rep = 2;\n    int scaling_factor = P;\n\n    /*\n    for (int i = -10; i <= 10; ++i) {\n        int dim = (scaling_factor+i)*P;\n        int block_size = (dim/P) * dim;\n        int total_size = block_size * P;\n\n        if (rank == 0)\n            std::cout << \"dim = \" << dim << std::endl;\n\n        std::vector<double> in(total_size);\n        std::vector<double> result(block_size);\n        MPI_Request reqs[2];\n\n        {\n            Timer time(n_rep, \"MPI_Reduce_scatter_block\");\n            for (int i = 0; i < n_rep; ++i) {\n                MPI_Ireduce_scatter_block(in.data(),\n                                   result.data(),\n                                   block_size/2,\n                                   MPI_DOUBLE,\n                                   MPI_SUM,\n                                   MPI_COMM_WORLD,\n                                   &reqs[0]);\n                MPI_Ireduce_scatter_block(in.data(),\n                                   result.data(),\n                                   block_size/2,\n                                   MPI_DOUBLE,\n                                   MPI_SUM,\n                                   MPI_COMM_WORLD,\n                                   &reqs[1]);\n                MPI_Waitall(2, &reqs[0], MPI_STATUSES_IGNORE);\n            }\n        }\n    }\n    */\n    int dim = 17408;\n    int block_size = (dim/P) * dim;\n    int total_size = block_size * P;\n    std::vector<double> in(total_size);\n    std::vector<double> result(block_size);\n    {\n        Timer time(n_rep, \"MPI_Reduce_scatter_block\");\n        for (int i = 0; i < n_rep; ++i) {\n            MPI_Reduce_scatter_block(in.data(),\n                               result.data(),\n                               block_size,\n                               MPI_DOUBLE,\n                               MPI_SUM,\n                               MPI_COMM_WORLD);\n        }\n    }\n\n    MPI_Finalize();\n    return 0;\n}\n"
  },
  {
    "path": "benchmarks/run_ubench.sh",
    "content": "n_nodes_list=(2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36)\n\nrun() {\n\n    n_nodes=$1\n    srun -N $n_nodes -n $n_nodes ./tests/ubench/ubench-allgather\n}\n\nIFS=\n\nfor n_nodes in ${n_nodes_list[@]}\ndo \n    echo \"NODES = \"$n_nodes\n    output=$(run $n_nodes)\n    avg_time_v=$(echo $output | awk '/MPI_Allgatherv AVG TIME/ {print $5}')\n    avg_time=$(echo $output | awk '/MPI_Allgather AVG TIME/ {print $5}')\n    echo $output\n    echo \"avg_time_v = \"$avg_time_v\n    echo \"avg_time = \"$avg_time\n    echo $avg_time_v >> \"allgather_v.txt\"\n    echo $avg_time >> \"allgather.txt\"\ndone\n"
  },
  {
    "path": "benchmarks/scalapack_transformer.cpp",
    "content": "#include <cosma/math_utils.hpp>\n\n#include <grid2grid/transform.hpp>\n#include <grid2grid/scalapack_layout.hpp>\n#include <mpi.h>\n\n#include <iostream>\n#include <iomanip>\n#include <string>\n#include <fstream>\n#include <sstream>\n#include <chrono>\n#include <array>\n#include <numeric>\n\nusing namespace grid2grid;\n\nextern \"C\" {\n    /* Cblacs declarations */\n    void Cblacs_pinfo(int*, int*);\n    void Cblacs_get(int, int, int*);\n    void Cblacs_gridinit(int*, const char*, int, int);\n    void Cblacs_pcoord(int, int, int*, int*);\n    void Cblacs_gridexit(int);\n    void Cblacs_barrier(int, const char*);\n\n    int numroc_(int*, int*, int*, int*, int*);\n\n    void pdgemr2d_(int *m, int *n,\n            double *a, int *ia, int *ja, int *desca,\n            double *b, int *ib, int *jb, int *descb,\n            int* ictxt);\n\n    void descinit_(int* desc, int* m, int* n, int* bm, int* bn, \n            int* rsrc, int* csrc, int* ctxt, int* lda, int* info);\n}\n\n// *****************************\n// OUR LAYOUT TRANSFORMER\n// *****************************\nlong int run_our_layout(int m, int n, int bm1, int bn1, int bm2, int bn2, int pm, int pn, int nrep, int rank) {\n    auto ordering = scalapack::ordering::row_major;\n\n    auto values = [](int i, int j) {\n        return cosma::math_utils::cantor_pairing(i, j);\n    };\n\n    scalapack::data_layout layout1({m, n}, {bm1, bn1}, {pm, pn}, ordering);\n    std::vector<double> buffer1 = initialize_locally(rank, layout1, values);\n    grid_layout scalapack_layout_1 = get_scalapack_grid(layout1, buffer1.data(), rank);\n\n    scalapack::data_layout layout2({m, n}, {bm2, bn2}, {pm, pn}, ordering);\n    std::vector<double> buffer2 = initialize_locally(rank, layout2, values);\n    grid_layout scalapack_layout_2 = get_scalapack_grid(layout2, buffer2.data(), rank);\n\n    long int min_time = std::numeric_limits<long int>::max();\n\n    for (int i = 0; i < nrep; ++i) {\n        MPI_Barrier(MPI_COMM_WORLD);\n        auto start = std::chrono::steady_clock::now();\n\n        transform(scalapack_layout_1, scalapack_layout_2, MPI_COMM_WORLD);\n\n        MPI_Barrier(MPI_COMM_WORLD);\n        auto end = std::chrono::steady_clock::now();\n\n        auto our_time = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();\n        min_time = std::min(our_time, min_time);\n    }\n\n    return min_time;\n}\n\n// *****************************\n// SCALAPACK LAYOUT TRANSFORMER\n// *****************************\nlong int run_scalapack_layout(int m, int n, int bm1, int bn1, int bm2, int bn2, int pm, int pn, int nrep, int rank) {\n    // Begin Cblas context\n    // We assume that we have 4 processes and place them in a 2-by-2 grid\n    int iZERO = 0;\n    int ctxt, myid, myrow, mycol, numproc;\n    int procrows = 2, proccols = 2;\n\n    Cblacs_pinfo(&myid, &numproc);\n    Cblacs_get(0, 0, &ctxt);\n    Cblacs_gridinit(&ctxt, \"Row-major\", procrows, proccols);\n    Cblacs_pcoord(ctxt, myid, &myrow, &mycol);\n\n    // Number of rows and cols owned by the current process\n    int nrows1 = numroc_(&m, &bm1, &myrow, &iZERO, &procrows);\n    int ncols1 = numroc_(&n, &bn1, &mycol, &iZERO, &proccols);\n\n    int nrows2 = numroc_(&m, &bm2, &myrow, &iZERO, &procrows);\n    int ncols2 = numroc_(&n, &bn2, &mycol, &iZERO, &proccols);\n\n    std::vector<double> buffer1(nrows1 * ncols1);\n    std::vector<double> buffer2(nrows2 * ncols2);\n\n    int ia = 1;\n    int ja = 1;\n    int ib = 1;\n    int jb = 1;\n\n    // std::vector<int> desca = {1, ctxt, m, n, bm1, bn1, 0, 0, m};\n    // std::vector<int> descb = {1, ctxt, m, n, bm2, bn2, 0, 0, m};\n\n    std::array<int, 9> desc1;\n    std::array<int, 9> desc2;\n    int info;\n    descinit_(&desc1[0], &m, &n, &bm1, &bn1, &iZERO, &iZERO, &ctxt, &nrows1, &info);\n    descinit_(&desc2[0], &m, &n, &bm2, &bn2, &iZERO, &iZERO, &ctxt, &nrows2, &info);\n\n    long int min_time = std::numeric_limits<long int>::max();\n    for (int i = 0; i < nrep; ++i) {\n        MPI_Barrier(MPI_COMM_WORLD);\n        auto start = std::chrono::steady_clock::now();\n        pdgemr2d_(&m, &n, buffer1.data(), &ia, &ib, &desc1[0],\n                buffer2.data(), &ib, &jb, &desc2[0],\n                &ctxt);\n\n        MPI_Barrier(MPI_COMM_WORLD);\n        auto end = std::chrono::steady_clock::now();\n        auto scalapack_time = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();\n        min_time = std::min(min_time, scalapack_time);\n    }\n\n    // Release resources\n    Cblacs_gridexit(ctxt);\n\n    return min_time;\n}\n\nint main(int argc, char **argv) {\n    MPI_Init(&argc, &argv);\n\n    int P, rank;\n    MPI_Comm_size(MPI_COMM_WORLD, &P);\n    MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n\n    int dim = 10000;\n    int bm1 = 124;\n    int bn1 = 124;\n\n    int pm = 2;\n    int pn = 2;\n\n    int bm2 = 192;\n    int bn2 = 192;\n\n    int nrep = 3;\n\n    for (int i = 1; i <= 5; ++i) { \n        int m = dim * i;\n        int n = dim * i;\n\n        auto our_time = run_our_layout(m, n, bm1, bn1, bm2, bn2, pm, pn, nrep, rank);\n        auto scalapack_time = run_scalapack_layout(m, n, bm1, bn1, bm2, bn2, pm, pn, nrep, rank);\n\n        if (rank == 0) {\n            std::cout << \"Dimension = \" << m << std::endl;\n            std::cout << \"Our time [ms] = \" << our_time << std::endl;\n            std::cout << \"ScaLAPACK time [ms] = \" << scalapack_time << std::endl;\n            std::cout << \"Ration scalapack/our = \" << 1.0 * scalapack_time/our_time << std::endl;\n            std::cout << \"============================\" << std::endl;\n        }\n    }\n\n    MPI_Finalize();\n    return 0;\n}\n\n"
  },
  {
    "path": "benchmarks/sendrecv.cpp",
    "content": "#include <cosma/interval.hpp>\n#include <cosma/timer.hpp>\n\n#include <mpi.h>\n\n#include <algorithm>\n#include <cctype>\n#include <chrono>\n#include <cstdlib>\n#include <iostream>\n#include <string>\n#include <vector>\n\nusing namespace cosma;\n\nint main(int argc, char **argv) {\n    MPI_Init(&argc, &argv);\n\n    int P, rank;\n    MPI_Comm_size(MPI_COMM_WORLD, &P);\n    MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n\n    int base_size = 1 << 25;\n    int local_size = base_size;\n\n    std::vector<double> in(local_size);\n    std::vector<double> result(local_size);\n\n    const int n_rep = 10;\n    for (int i = 0; i < n_rep; ++i) {\n        int target = 1 - rank;\n        MPI_Sendrecv(in.data(),\n                     local_size,\n                     MPI_DOUBLE,\n                     target,\n                     0,\n                     result.data(),\n                     local_size,\n                     MPI_DOUBLE,\n                     target,\n                     0,\n                     MPI_COMM_WORLD,\n                     MPI_STATUS_IGNORE);\n    }\n\n    MPI_Finalize();\n    return 0;\n}\n"
  },
  {
    "path": "benchmarks/transpose.cpp",
    "content": "#include <costa/grid2grid/memory_utils.hpp>\n#include <costa/grid2grid/threads_workspace.hpp>\n#include <mkl.h>\n#include <chrono>\n#include <limits>\n\nint main(int argc, char** argv) {\n    int n_rep = 3;\n    // dimensions before transposing\n    std::vector<int> n_rows = {5000, 10000, 15000, 20000, 25000, 30000}; // 5000;\n    std::vector<int> n_cols = {5000, 10000, 15000, 20000, 25000, 30000}; // 10000;\n\n    // not strided\n    auto src_stride = n_rows; // 5000;\n    auto  dest_stride = n_cols; // 10000;\n    bool conjugate = false;\n\n    costa::memory::threads_workspace<double> workspace(256);\n\n    std::vector<long> g2g_times;\n    std::vector<long> mkl_times;\n\n    for (int i = 0; i < n_rows.size(); ++i) {\n        long g2g_time = std::numeric_limits<long>::max();\n        long mkl_time = std::numeric_limits<long>::max();\n\n        src_stride[i] = std::max(n_rows[i], src_stride[i]);\n        // since transposed\n        dest_stride[i] = std::max(n_cols[i], dest_stride[i]);\n\n        std::vector<double> src(src_stride[i] * n_cols[i]);\n        std::vector<double> dest_g2g(dest_stride[i] * n_rows[i]);\n        std::vector<double> dest_mkl(dest_stride[i] * n_rows[i]);\n\n        for (int row = 0; row < n_rows[i]; ++row) {\n            for (int col = 0; col < n_cols[i]; ++col) {\n                src[col * src_stride[i] + row] = col * src_stride[i] + row;\n            }\n        }\n\n        for (int rep = 0; rep < n_rep; ++rep) {\n            // ***********************************\n            // transpose with costa \n            // ***********************************\n            auto start = std::chrono::steady_clock::now();\n            costa::memory::copy_and_transpose<double>(src.data(), n_rows[i], n_cols[i], src_stride[i],\n                                                  dest_g2g.data(), dest_stride[i], false, workspace);\n            auto end = std::chrono::steady_clock::now();\n            g2g_time = std::min(g2g_time, (long) std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count());\n\n            // ***********************************\n            // transpose with mkl\n            // ***********************************\n            start = std::chrono::steady_clock::now();\n            mkl_domatcopy('C', 'T', n_rows[i], n_cols[i], 1.0, src.data(), src_stride[i], dest_mkl.data(), dest_stride[i]);\n            end = std::chrono::steady_clock::now();\n            mkl_time = std::min(mkl_time, (long) std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count());\n        }\n\n        g2g_times.push_back(g2g_time);\n        mkl_times.push_back(mkl_time);\n\n        // ***********************************\n        // checking results\n        // ***********************************\n        int n_rows_t = n_cols[i];\n        int n_cols_t = n_rows[i];\n        for (int row = 0; row < n_rows_t; ++row) {\n            for (int col = 0; col < n_cols_t; ++col) {\n                // dest_stride >= n_cols\n                auto g2g = dest_g2g[col * dest_stride[i] + row];\n                auto mkl = dest_mkl[col * dest_stride[i] + row];\n                auto target = src[row * src_stride[i] + col];\n                if (g2g != mkl) {\n                    std::cout << \"Error: (\" << col << \", \" << row << \") = \" << \", g2g = \" << g2g << \", mkl = \" << mkl << \", target = \" << target << std::endl;\n                }\n            }\n        }\n    }\n\n    // ***********************************\n    // output COSTA timings\n    // ***********************************\n    std::cout << \"COSTA times: \" << std::endl;\n    for (int i = 0; i < g2g_times.size(); ++i) {\n        std::cout << g2g_times[i] << \", \";\n    }\n    std::cout << std::endl;\n\n    // ***********************************\n    // output MKL timings\n    // ***********************************\n    std::cout << \"mkl times: \" << std::endl;\n    for (int i = 0; i < mkl_times.size(); ++i) {\n        std::cout << mkl_times[i] << \", \";\n    }\n    std::cout << std::endl;\n\n    return 0;\n}\n\n\n\n\n"
  },
  {
    "path": "benchmarks/ubench-allgather.cpp",
    "content": "#include <cosma/interval.hpp>\n#include <cosma/timer.hpp>\n\n#include <mpi.h>\n\n#include <algorithm>\n#include <cctype>\n#include <chrono>\n#include <cstdlib>\n#include <iostream>\n#include <string>\n#include <vector>\n\nusing namespace cosma;\n\nint main(int argc, char **argv) {\n    MPI_Init(&argc, &argv);\n\n    int P, rank;\n    MPI_Comm_size(MPI_COMM_WORLD, &P);\n    MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n\n    int base_size = 1500000;\n    int var = base_size / 10;\n    int local_size = base_size + ((rank % 2 == 0) ? var : 0);\n    int max_size = -1;\n    int total_size = 0;\n\n    std::vector<int> sizes(P);\n    std::vector<int> dspls(P);\n\n    for (int i = 0; i < P; ++i) {\n        int local_size = base_size + ((i % 2 == 0) ? var : 0);\n        max_size = std::max(max_size, local_size);\n        sizes[i] = local_size;\n        dspls[i] = total_size;\n        total_size += local_size;\n    }\n\n    std::vector<double> in(local_size);\n    std::vector<double> in_padded(max_size);\n\n    std::vector<double> result(total_size);\n    std::vector<double> result_padded(P * max_size);\n\n    const int n_rep = 30;\n\n    {\n        Timer time(n_rep, \"MPI_Allgatherv\");\n        for (int i = 0; i < n_rep; ++i) {\n            MPI_Allgatherv(in.data(),\n                           local_size,\n                           MPI_DOUBLE,\n                           result.data(),\n                           sizes.data(),\n                           dspls.data(),\n                           MPI_DOUBLE,\n                           MPI_COMM_WORLD);\n        }\n    }\n\n    {\n        Timer time(n_rep, \"MPI_Allgather\");\n        for (int i = 0; i < n_rep; ++i) {\n            MPI_Allgather(in_padded.data(),\n                          max_size,\n                          MPI_DOUBLE,\n                          result_padded.data(),\n                          max_size,\n                          MPI_DOUBLE,\n                          MPI_COMM_WORLD);\n        }\n    }\n\n    MPI_Finalize();\n    return 0;\n}\n"
  },
  {
    "path": "bors.toml",
    "content": "status = [\n  \"ci/gitlab/%\",\n]\ndelete_merged_branches = true\n"
  },
  {
    "path": "ci/baseimage.cuda.Dockerfile",
    "content": "FROM ubuntu:24.04 as builder\n\nARG CUDA_ARCH=90\n\nENV DEBIAN_FRONTEND noninteractive\n\nENV FORCE_UNSAFE_CONFIGURE 1\n\nENV PATH=\"/spack/bin:${PATH}\"\n\nENV MPICH_VERSION=4.3.2\nENV CMAKE_VERSION=3.30.9\n\nRUN apt-get -y update\n\nRUN apt-get install -y apt-utils\n\n# install basic tools\nRUN apt-get install -y --no-install-recommends gcc g++ gfortran clang libomp-14-dev git make unzip file \\\n  vim wget pkg-config python3-pip python3-dev cython3 python3-pythran tcl m4 cpio curl automake meson \\\n  xz-utils patch patchelf apt-transport-https ca-certificates gnupg software-properties-common perl tar bzip2 \\\n  liblzma-dev libbz2-dev\n\n# install CMake\nRUN wget https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-$(uname -m).tar.gz -O cmake.tar.gz && \\\n    tar zxvf cmake.tar.gz --strip-components=1 -C /usr\n\n# get latest version of spack\nRUN git clone -b releases/v1.1 https://github.com/spack/spack.git\n\n# set the location of packages built by spack\nRUN spack config add config:install_tree:root:/opt/local\n# set cuda_arch for all packages\nRUN spack config add packages:all:variants:cuda_arch=${CUDA_ARCH}\n\n# add local repo for cosma and tiled-mm\nCOPY ./spack_repo /spack_repo\nRUN spack repo add /spack_repo/cosma\n\n# find all external packages\nRUN spack external find --all --exclude python --exclude meson\n\n# find compilers\nRUN spack compiler find\n\n# install MPICH\nRUN spack install mpich@${MPICH_VERSION} %gcc\n\n# for the MPI hook\nRUN echo $(spack find --format='{prefix.lib}' mpich) > /etc/ld.so.conf.d/mpich.conf\nRUN ldconfig\n\n# # create environments for several configurations and install dependencies\nRUN spack env create -d /cosma-env-cuda && \\\n    spack -e /cosma-env-cuda add \"cosma@=master +cuda +tests +scalapack +shared %gcc  ^mpich\" && \\\n    spack -e /cosma-env-cuda add \"tiled-mm@=master\" && \\\n    spack -e /cosma-env-cuda develop -p \"./tiled-mm\" \"tiled-mm\" && \\\n    spack -e /cosma-env-cuda add \"costa@=master\" && \\\n    spack -e /cosma-env-cuda develop -p \"./costa\" \"costa\" && \\\n    spack -e /cosma-env-cuda develop -p /src cosma@master\nRUN spack -e /cosma-env-cuda install --only=dependencies --fail-fast\nRUN spack clean -a\n\n# RUN spack env create -d /cosma-env-cuda-gpu-direct && \\\n#     spack -e /cosma-env-cuda-gpu-direct add \"cosma@master +cuda +tests +scalapack +shared +gpu_direct %gcc  ^mpich \" && \\\n#     spack -e /cosma-env-cuda-gpu-direct add \"tiled-mm@master\" && \\\n#     spack -e /cosma-env-cuda-gpu-direct add \"costa@master\" && \\\n#     spack -e /cosma-env-cuda-gpu-direct add \"cuda@12\" && \\\n#     spack -e /cosma-env-cuda-gpu-direct develop -p /src cosma@master && \\\n#     spack -e /cosma-env-cuda-gpu-direct install --only=dependencies --fail-fast\n\n# RUN spack env create -d /cosma-env-cuda-nccl && \\\n#     spack -e /cosma-env-cuda-nccl add \"cosma@master +cuda +tests +scalapack +shared +nccl  %gcc ^mpich \" && \\\n#     spack -e /cosma-env-cuda-nccl add \"tiled-mm@2.3.1\" && \\\n#     spack -e /cosma-env-cuda-nccl add \"costa@master\" && \\\n#     spack -e /cosma-env-cuda-nccl add \"cuda@12\" && \\\n#     spack -e /cosma-env-cuda-nccl develop -p /src cosma@master && \\\n#     spack -e /cosma-env-cuda-nccl install --only=dependencies --fail-fast\n\n# RUN spack env create -d /cosma-env-cpu && \\\n#     spack -e /cosma-env-cpu add \"cosma@master ~cuda +tests +scalapack +shared %gcc  ^mpich \" && \\\n#     spack -e /cosma-env-cpu add \"costa@master\" && \\\n#     spack -e /cosma-env-cpu develop -p /src cosma@master && \\\n#     spack -e /cosma-env-cpu install --only=dependencies --fail-fast\n"
  },
  {
    "path": "ci/build.Dockerfile",
    "content": "ARG BASE_IMAGE\nFROM $BASE_IMAGE\n\nARG ENVPATH\n\n# copy source files of the pull request into container\nCOPY . /src\n\n# # show the spack's spec\nRUN spack -e $ENVPATH find -lcdv\n\n# build COSTA and Tiled-MM with current @master branch\nRUN cd $ENVPATH/costa && git pull && git log --oneline -1 && \\\n    cd $ENVPATH/tiled-mm && git pull && git log --oneline -1\n\n# show the spack.yaml\nRUN cat $ENVPATH/spack.yaml\n\n# build packages\nRUN spack -e $ENVPATH install\n\n# we need a fixed name for the build directory\n# here is a hacky workaround to link ./spack-build-{hash} to ./spack-build\nRUN cd /src && ln -s $(spack -e $ENVPATH location -b cosma) spack-build\n"
  },
  {
    "path": "ci/cscs.yml",
    "content": "include:\n  - remote: 'https://gitlab.com/cscs-ci/recipes/-/raw/master/templates/v2/.ci-ext.yml'\n\nstages:\n  - baseimage\n  - build\n  - test\n\nbuild base image:\n  extends: [.dynamic-image-name, .container-builder-cscs-gh200]\n  stage: baseimage\n  timeout: 2h\n  variables:\n    DOCKERFILE: ci/baseimage.cuda.Dockerfile\n    WATCH_FILECHANGES: ci/baseimage.cuda.Dockerfile\n    PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/base/cosma-ci\n    KUBERNETES_MEMORY_REQUEST: \"92Gi\"\n    KUBERNETES_MEMORY_LIMIT: \"92Gi\"\n\nbuild cosma:\n  extends: .container-builder-cscs-gh200\n  needs: [\"build base image\"]\n  stage: build\n  variables:\n    CSCS_REBUILD_POLICY: \"always\"\n    DOCKERFILE: ci/build.Dockerfile\n    PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/cosma/cosma-ci:$CI_COMMIT_SHA\n    ENVPATH: \"/cosma-env-cuda\"\n    DOCKER_BUILD_ARGS: '[\"BASE_IMAGE=${BASE_IMAGE}\", \"ENVPATH=$ENVPATH\"]'\n\n.run_tests:\n  extends: [.container-runner-daint-gh200]\n  needs: [\"build cosma\"]\n  stage: test\n  image: $CSCS_REGISTRY_PATH/cosma/cosma-ci:$CI_COMMIT_SHA\n  variables:\n    GIT_STRATEGY: none\n    MPICH_MAX_THREAD_SAFETY: multiple\n    CSCS_REGISTRY_LOGIN: 'YES'\n    PULL_IMAGE: 'YES'\n    SLURM_HINT: nomultithread\n    SLURM_UNBUFFEREDIO: ''\n    SLURM_CPU_BIND: 'socket'\n    SLURM_MPI: \"pmi2\"\n    CRAY_CUDA_MPS: 'YES'\n    # SLURM_WAIT: 0\n    COSMA_GPU_MAX_TILE_K: 100\n    COSMA_GPU_MAX_TILE_M: 100\n    COSMA_GPU_MAX_TILE_N: 100\n\nmapper:\n  extends: .run_tests\n  stage: test\n  script: /cosma-env-cuda/.spack-env/view/bin/test.mapper\n  variables:\n    SLURM_JOB_NUM_NODES: 1\n    SLURM_NTASKS: 1\n    USE_MPI: 'YES'\n\npdgemm:\n  extends: .run_tests\n  stage: test\n  script: /cosma-env-cuda/.spack-env/view/bin/test.pdgemm\n  variables:\n    SLURM_JOB_NUM_NODES: 2\n    SLURM_NTASKS: 16\n    USE_MPI: 'YES'\n\nmultiply:\n  extends: .run_tests\n  stage: test\n  script: /cosma-env-cuda/.spack-env/view/bin/test.multiply\n  variables:\n    SLURM_JOB_NUM_NODES: 2\n    SLURM_NTASKS: 16\n    USE_MPI: 'YES'\n\nscalar_matmul:\n  extends: .run_tests\n  stage: test\n  script: /cosma-env-cuda/.spack-env/view/bin/test.scalar_matmul\n  variables:\n    SLURM_JOB_NUM_NODES: 1\n    SLURM_NTASKS: 8\n    USE_MPI: 'YES'\n\nmultiply_using_layout:\n  extends: .run_tests\n  stage: test\n  script: /cosma-env-cuda/.spack-env/view/bin/test.multiply_using_layout\n  variables:\n    SLURM_JOB_NUM_NODES: 1\n    SLURM_NTASKS: 4\n"
  },
  {
    "path": "ci/mps-wrapper.sh",
    "content": "#!/bin/bash\n# Example mps-wrapper.sh usage:\n# > srun --cpu-bind=socket [...] mps-wrapper.sh <cmd>\n\nexport CUDA_MPS_PIPE_DIRECTORY=/tmp/nvidia-mps\nexport CUDA_MPS_LOG_DIRECTORY=/tmp/nvidia-log\n# Launch MPS from a single rank per node\nif [ $SLURM_LOCALID -eq 0 ]; then\n    CUDA_VISIBLE_DEVICES=0,1,2,3 nvidia-cuda-mps-control -d\nfi\n\n# set cuda device\nnuma_nodes=$(hwloc-calc --physical --intersect NUMAnode $(taskset -p $$ | awk '{print \"0x\"$6}'))\nexport CUDA_VISIBLE_DEVICES=$numa_nodes\n# Run the command\nexec numactl --membind=$numa_nodes \"$@\"\n"
  },
  {
    "path": "cmake/FindARMPL.cmake",
    "content": "# Copyright (c) 2022- ETH Zurich\n#\n# authors : Mathieu Taillefumier\n\ninclude(FindPackageHandleStandardArgs)\n\nset(_ARMPL_PATHS ${ARMPL_ROOT}\n  $ENV{ARMPL_ROOT}\n  $ENV{ARMPLROOT}\n  $ENV{ARMPL_DIR}\n  $ENV{ARMPLDIR}\n  $ENV{ORNL_ARMPL_ROOT}\n  $ENV{CRAY_ARMPL_ROOT})\n\nforeach(_var armpl armpl_int64 armpl_ilp64 armpl_lp64 armpl_ilp64_mp armpl_lp64_mp)\n  string(TOUPPER ${_var} _var_up)\n  find_library(\"COSMA_${_var_up}_LINK_LIBRARIES\" NAME ${_var} HINTS ${_ARMPL_PATHS} PATH_SUFFIXES \"lib\" \"lib64\" \"armpl/lib\" \"armpl/lib64\" \"armpl\")\nendforeach()\n\nfind_path(COSMA_ARMPL_INCLUDE_DIRS NAMES \"armpl.h\" HINTS ${_ARMPL_PATHS} PATH_SUFFIXES \"include\" \"armpl\" \"armpl/include\" \"include/armpl\")\n\n# Check for 64bit Integer support\nif(COSMA_BLAS_INTERFACE MATCHES \"64bits\")\n  set(COSMA_BLAS_armpl_LIB \"ARMPL_ILP64\")\nelse()\n  set(COSMA_BLAS_armpl_LIB \"ARMPL_LP64\")\nendif()\n\n# Check for OpenMP support, VIA BLAS_VENDOR of Arm_mp or Arm_ipl64_mp\nif(COSMA_BLAS_THREADING MATCHES \"openmp\")\n  string(APPEND COSMA_BLAS_armpl_LIB \"_MP\")\nendif()\n\n# check if found\nfind_package_handle_standard_args(\n  Armpl REQUIRED_VARS COSMA_ARMPL_INCLUDE_DIRS COSMA_ARMPL_LP64_LINK_LIBRARIES\n  COSMA_ARMPL_LP64_MP_LINK_LIBRARIES COSMA_ARMPL_ILP64_LINK_LIBRARIES COSMA_ARMPL_ILP64_MP_LINK_LIBRARIES)\n\n# add target to link against\nif (NOT TARGET cosma::BLAS::ARMPL::armpl)\n  add_library(cosma::BLAS::ARMPL::armpl INTERFACE IMPORTED)\n  # now define an alias to the target library\n  add_library(cosma::BLAS::ARMPL::blas ALIAS cosma::BLAS::ARMPL::armpl)\nendif()\n\n# we need to iniitialize the targets of each individual libraries only once.\nforeach(_var armpl_ilp64 armpl_lp64 armpl_ilp64_mp armpl_lp64_mp)\n  string(TOUPPER \"${_var}\" _var_up)\n  if (NOT TARGET cosma::BLAS::ARMPL::${_var})\n    add_library(cosma::BLAS::ARMPL::${_var} INTERFACE IMPORTED)\n    set_property(TARGET cosma::BLAS::ARMPL::${_var} PROPERTY INTERFACE_INCLUDE_DIRECTORIES\n      ${COSMA_ARMPL_INCLUDE_DIRS})\n    set_property(TARGET cosma::BLAS::ARMPL::${_var} PROPERTY INTERFACE_LINK_LIBRARIES\n      \"${COSMA_${_var_up}_LINK_LIBRARIES}\")\n  endif()\nendforeach()\n\nset_property(TARGET cosma::BLAS::ARMPL::armpl PROPERTY INTERFACE_INCLUDE_DIRECTORIES\n  ${COSMA_ARMPL_INCLUDE_DIRS})\nset_property(TARGET cosma::BLAS::ARMPL::armpl PROPERTY INTERFACE_LINK_LIBRARIES\n  \"${COSMA_${COSMA_BLAS_armpl_LIB}_LINK_LIBRARIES}\")\nendif()\n\nset(COSMA_BLAS_VENDOR \"ARMPL\")\n\nmark_as_advanced(COSMA_ARMPL_FOUND COSMA_BLAS_VENDOR COSMA_ARMPL_INCLUDE_DIRS)\n"
  },
  {
    "path": "cmake/FindATLAS.cmake",
    "content": "#  Copyright (c) 2019 ETH Zurich\n#\n#  Redistribution and use in source and binary forms, with or without\n#  modification, are permitted provided that the following conditions are met:\n#\n#  1. Redistributions of source code must retain the above copyright notice,\n#     this list of conditions and the following disclaimer.\n#  2. Redistributions in binary form must reproduce the above copyright\n#     notice, this list of conditions and the following disclaimer in the\n#     documentation and/or other materials provided with the distribution.\n#  3. Neither the name of the copyright holder nor the names of its contributors\n#     may be used to endorse or promote products derived from this software\n#     without specific prior written permission.\n#\n#  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n#  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n#  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n#  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n#  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n#  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n#  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n#  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n#  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n#  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n#  POSSIBILITY OF SUCH DAMAGE.\n\n\n#.rst:\n# FindATLAS\n# -----------\n#\n# This module tries to find the ATLAS library.\n#\n# The following variables are set\n#\n# ::\n#\n#   ATLAS_FOUND           - True if atlas is found\n#   ATLAS_LIBRARIES       - The required libraries\n#   ATLAS_INCLUDE_DIRS    - The required include directory\n#\n# The following import target is created\n#\n# ::\n#\n#   ATLAS::atlas\n\n#set paths to look for library from ROOT variables.If new policy is set, find_library() automatically uses them.\n# if(NOT POLICY CMP0074)\nset(_ATLAS_PATHS ${ATLAS_ROOT}\n                 $ENV{ATLAS_ROOT}\n                 $ENV{ATLASROOT}\n                 $ENV{ATLAS_DIR}\n                 $ENV{ATLASDIR})\n# endif()\n\nfind_library(\n    COSMA_ATLAS_LINK_LIBRARIES\n    NAMES \"atlas\"\n    HINTS ${_ATLAS_PATHS}\n    PATH_SUFFIXES \"atlas/lib\" \"atlas/lib64\" \"atlas\"\n)\nfind_path(\n    COSMA_ATLAS_INCLUDE_DIRS\n    NAMES \"cblas-atlas.h\" \"cblas_atlas.h\" \"cblas.h\" \n    HINTS ${_ATLAS_PATHS}\n    PATH_SUFFIXES \"atlas\" \"atlas/include\" \"include/atlas\"\n)\n\n# check if found\ninclude(FindPackageHandleStandardArgs)\nfind_package_handle_standard_args(ATLAS REQUIRED_VARS COSMA_ATLAS_INCLUDE_DIRS COSMA_ATLAS_LINK_LIBRARIES)\n\n# add target to link against\nif(NOT TARGET cosma::BLAS::ATLAS::atlas)\n  add_library(cosma::BLAS::ATLAS::atlas INTERFACE IMPORTED)\n  add_library(cosma::BLAS::ATLAS::blas ALIAS cosma::BLAS::ATLAS::atlas)\nendif()\nset_property(TARGET cosma::BLAS::ATLAS::atlas PROPERTY INTERFACE_LINK_LIBRARIES ${COSMA_ATLAS_LINK_LIBRARIES})\nset_property(TARGET cosma::BLAS::ATLAS::atlas PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${COSMA_ATLAS_INCLUDE_DIRS})\n\n# prevent clutter in cache\nMARK_AS_ADVANCED(ATLAS_FOUND ATLAS_LIBRARIES ATLAS_INCLUDE_DIRS)\n"
  },
  {
    "path": "cmake/FindBLIS.cmake",
    "content": "#  Copyright (c) 2019 ETH Zurich\n#\n#  Redistribution and use in source and binary forms, with or without\n#  modification, are permitted provided that the following conditions are met:\n#\n#  1. Redistributions of source code must retain the above copyright notice,\n#     this list of conditions and the following disclaimer.\n#  2. Redistributions in binary form must reproduce the above copyright\n#     notice, this list of conditions and the following disclaimer in the\n#     documentation and/or other materials provided with the distribution.\n#  3. Neither the name of the copyright holder nor the names of its contributors\n#     may be used to endorse or promote products derived from this software\n#     without specific prior written permission.\n#\n#  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n#  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n#  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n#  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n#  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n#  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n#  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n#  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n#  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n#  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n#  POSSIBILITY OF SUCH DAMAGE.\n\n\n#.rst:\n# FindBLIS\n# -----------\n#\n# This module tries to find the BLIS library.\n#\n# The following variables are set\n#\n# ::\n#\n#   BLIS_FOUND           - True if blis is found\n#   BLIS_LIBRARIES       - The required libraries\n#   BLIS_INCLUDE_DIRS    - The required include directory\n#\n# The following import target is created\n#\n# ::\n#\n#   BLIS::blis\n\n#set paths to look for library from ROOT variables.If new policy is set, find_library() automatically uses them.\n# if(NOT POLICY CMP0074)\nset(_BLIS_PATHS ${BLIS_ROOT} \n                $ENV{BLIS_ROOT} \n                $ENV{BLISROOT}\n                $ENV{BLIS_DIR}\n                $ENV{BLISDIR})\n# endif()\n\nfind_library(\n    COSMA_BLIS_LINK_LIBRARIES\n    NAMES \"blis\"\n    HINTS ${_BLIS_PATHS}\n    PATH_SUFFIXES \"lib\" \"lib64\" \"blis/lib\" \"blis/lib64\" \"blis\"\n)\nfind_path(\n    COSMA_BLIS_INCLUDE_DIRS\n    NAMES \"blis.h\"\n    HINTS ${_BLIS_PATHS}\n    PATH_SUFFIXES \"include\" \"blis\" \"blis/include\" \"include/blis\"\n)\nfind_path(\n    COSMA_BLIS_CBLAS_INCLUDE_DIRS\n    NAMES \"cblas_blis.h\" \"cblas-blis.h\" \"cblas.h\" \n    HINTS ${_BLIS_PATHS}\n    PATH_SUFFIXES \"include\" \"blis\" \"blis/include\" \"include/blis\"\n)\n\n# check if found\ninclude(FindPackageHandleStandardArgs)\nfind_package_handle_standard_args(BLIS REQUIRED_VARS COSMA_BLIS_INCLUDE_DIRS COSMA_BLIS_LINK_LIBRARIES COSMA_BLIS_CBLAS_INCLUDE_DIRS)\n\n# add target to link against\nif(NOT TARGET cosma::BLAS::BLIS::blis)\n  add_library(cosma::BLAS::BLIS::blis INTERFACE IMPORTED)\n  add_library(cosma::BLAS::BLIS::blas ALIAS cosma::BLAS::BLIS::blis)\nendif()\nset_property(TARGET cosma::BLAS::BLIS::blis PROPERTY INTERFACE_LINK_LIBRARIES ${COSMA_BLIS_LINK_LIBRARIES})\nset_property(TARGET cosma::BLAS::BLIS::blis PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${COSMA_BLIS_INCLUDE_DIRS} ${COSMA_BLIS_CBLAS_INCLUDE_DIRS})\n\n# prevent clutter in cache\nMARK_AS_ADVANCED(BLIS_FOUND COSMA_BLIS_LINK_LIBRARIES COSMA_BLIS_INCLUDE_DIRS COSMA_BLIS_CBLAS_INCLUDE_DIRS)\n"
  },
  {
    "path": "cmake/FindBlas.cmake",
    "content": "# Copyright (c) 2022- ETH Zurich\n#\n# authors : Mathieu Taillefumier\n\ninclude(FindPackageHandleStandardArgs)\n\nif(NOT\n   (CMAKE_C_COMPILER_LOADED\n    OR CMAKE_CXX_COMPILER_LOADED\n    OR CMAKE_Fortran_COMPILER_LOADED))\n  message(FATAL_ERROR \"FindBLAS requires Fortran, C, or C++ to be enabled.\")\nendif()\n\nset(COSMA_BLAS_VENDOR_LIST\n  \"auto\"\n  \"MKL\"\n  \"OPENBLAS\"\n  \"FLEXIBLAS\"\n  \"ARMPL\"\n  \"GenericBLAS\"\n  \"CRAY_LIBSCI\"\n  \"BLIS\"\n  \"ATLAS\"\n  \"NVPL\"\n  \"OFF\")\n\n# COSMA_BLAS_VENDOR should normally be defined here but cosma defines it in the\n# main CMakeLists.txt to keep the old behavior. the threading and integer\n# interface can also be controlled but are fixed to the default values that\n# COSMA was configured before introducing this module. So if findBLAS.cmake is\n# to be used elsewhere, it is better to look at what CP2K does and start from\n# there\n\nif(NOT ${COSMA_BLAS_VENDOR} IN_LIST COSMA_BLAS_VENDOR_LIST)\n  message(FATAL_ERROR \"Invalid Host BLAS backend\")\nendif()\n\nset(COSMA_BLAS_THREAD_LIST \"sequential\" \"thread\" \"gnu-thread\" \"intel-thread\"\n  \"tbb-thread\" \"openmp\")\n\nset(COSMA_BLAS_THREADING\n  \"openmp\"\n  CACHE STRING \"threaded blas library\")\nset_property(CACHE COSMA_BLAS_THREADING PROPERTY STRINGS\n  ${COSMA_BLAS_THREAD_LIST})\n\nif(NOT ${COSMA_BLAS_THREADING} IN_LIST COSMA_BLAS_THREAD_LIST)\n  message(FATAL_ERROR \"Invalid threaded BLAS backend\")\nendif()\n\nset(COSMA_BLAS_INTERFACE_BITS_LIST \"32bits\" \"64bits\")\nset(COSMA_BLAS_INTERFACE\n  \"32bits\"\n  CACHE STRING\n  \"32 bits integers are used for indices, matrices and vectors sizes\")\nset_property(CACHE COSMA_BLAS_INTERFACE\n  PROPERTY STRINGS ${COSMA_BLAS_INTERFACE_BITS_LIST})\n\nif(NOT ${COSMA_BLAS_INTERFACE} IN_LIST COSMA_BLAS_INTERFACE_BITS_LIST)\n  message(\n    FATAL_ERROR\n    \"Invalid parameters. Blas and lapack can exist in two flavors 32 or 64 bits interfaces (relevant mostly for mkl)\"\n  )\nendif()\n\nif (COSMA_BLAS_VENDOR MATCHES \"OFF\")\n   return ()\nendif()\n\nset(COSMA_BLAS_FOUND FALSE)\n\n# first check for a specific implementation if requested\n\nif(NOT COSMA_BLAS_VENDOR MATCHES \"auto\")\n   if (COSMA_BLAS_VENDOR MATCHES \"CUSTOM\")\n       find_package(GenericBLAS REQUIRED)\n   else()\n       find_package(${COSMA_BLAS_VENDOR} REQUIRED)\n  endif()\n  if(TARGET cosma::BLAS::${COSMA_BLAS_VENDOR}::blas)\n    get_target_property(COSMA_BLAS_INCLUDE_DIRS cosma::BLAS::${COSMA_BLAS_VENDOR}::blas\n                        INTERFACE_INCLUDE_DIRECTORIES)\n    get_target_property(COSMA_BLAS_LINK_LIBRARIES cosma::BLAS::${COSMA_BLAS_VENDOR}::blas\n                        INTERFACE_LINK_LIBRARIES)\n    set(COSMA_BLAS_FOUND TRUE)\n  endif()\nelse()\n  # search for any blas implementation and exit imediately if one is found\n  foreach(_libs ${COSMA_BLAS_VENDOR_LIST})\n    # i exclude the first item of the list\n    if (NOT _libs STREQUAL \"auto\")\n      find_package(${_libs})\n      if(TARGET cosma::BLAS::${_libs}::blas)\n        get_target_property(COSMA_BLAS_INCLUDE_DIRS cosma::BLAS::${_libs}::blas\n          INTERFACE_INCLUDE_DIRECTORIES)\n        get_target_property(COSMA_BLAS_LINK_LIBRARIES cosma::BLAS::${_libs}::blas\n          INTERFACE_LINK_LIBRARIES)\n        set(COSMA_BLAS_VENDOR \"${_libs}\")\n        set(COSMA_BLAS_FOUND TRUE)\n        break()\n      endif()\n    endif()\n  endforeach()\nendif()\n\nif(COSMA_BLAS_INCLUDE_DIRS)\n  find_package_handle_standard_args(\n    Blas REQUIRED_VARS COSMA_BLAS_LINK_LIBRARIES COSMA_BLAS_INCLUDE_DIRS\n                       COSMA_BLAS_VENDOR)\nelse()\n  message(WARNING \"Blas REQUIRED_VARS '${COSMA_BLAS_LINK_LIBRARIES}' '${COSMA_BLAS_VENDOR}'\")\n  find_package_handle_standard_args(\n    Blas REQUIRED_VARS COSMA_BLAS_LINK_LIBRARIES COSMA_BLAS_VENDOR)\nendif()\n\nif(NOT TARGET cosma::BLAS::blas)\n  add_library(cosma::BLAS::blas INTERFACE IMPORTED)\nendif()\n\nset_target_properties(cosma::BLAS::blas PROPERTIES INTERFACE_LINK_LIBRARIES\n  \"${COSMA_BLAS_LINK_LIBRARIES}\")\n\nif(COSMA_BLAS_INCLUDE_DIRS)\n  set_target_properties(cosma::BLAS::blas PROPERTIES INTERFACE_INCLUDE_DIRECTORIES\n    \"${COSMA_BLAS_INCLUDE_DIRS}\")\nendif()\n\nmark_as_advanced(COSMA_BLAS_INCLUDE_DIRS)\nmark_as_advanced(COSMA_BLAS_LINK_LIBRARIES)\nmark_as_advanced(COSMA_BLAS_VENDOR)\nmark_as_advanced(COSMA_BLAS_FOUND)\n"
  },
  {
    "path": "cmake/FindCRAY_LIBSCI.cmake",
    "content": "include(FindPackageHandleStandardArgs)\n\n# we are using the GNU compiler\nset(_sciname \"sci_gnu_mpi_mp\")\nset(_sciname_acc \"sci_acc_gnu_nv60\")\n\nfind_library(COSMA_CRAY_LIBSCI_LIBRARIES\n    NAMES ${_sciname_acc} ${_sciname}\n    HINTS\n    ${_SCALAPACK_LIBRARY_DIRS}\n    ENV CRAY_LIBSCI_PREFIX_DIR\n    ENV CRAY_PE_LIBSCI_PREFIX_DIR\n    ENV CRAY_LIBSCI_ACC_PREFIX_DIR\n    ENV CRAY_PE_LIBSCI_ACC_PREFIX_DIR\n    PATH_SUFFIXES lib\n    DOC \"Path to the Cray-libsci library.\")\n\nmessage(\"CRAY_LIBSCI: ${COSMA_CRAY_LIBSCI_LIBRARIES}\")\n\nfind_package_handle_standard_args(CRAY_LIBSCI DEFAULT_MSG COSMA_CRAY_LIBSCI_LIBRARIES)\n\nif (NOT TARGET cosma::BLAS::CRAY_LIBSCI::sci)\n\tadd_library(cosma::BLAS::CRAY_LIBSCI::sci INTERFACE IMPORTED)\n\tset_target_properties(cosma::BLAS::CRAY_LIBSCI::sci PROPERTIES INTERFACE_LINK_LIBRARIES \"${COSMA_CRAY_LIBSCI_LIBRARIES}\")\n\tadd_library(cosma::BLAS::CRAY_LIBSCI::blas ALIAS cosma::BLAS::CRAY_LIBSCI::sci)\n\n\tadd_library(cosma::BLAS::CRAY_LIBSCI::scalapack_link INTERFACE IMPORTED)\n\tset_target_properties(cosma::BLAS::CRAY_LIBSCI::scalapack_link PROPERTIES INTERFACE_LINK_LIBRARIES \"${COSMA_CRAY_LIBSCI_LIBRARIES}\")\nendif()\n"
  },
  {
    "path": "cmake/FindFLEXIBLAS.cmake",
    "content": "# Copyright (c) 2022- ETH Zurich\n#\n# authors : Mathieu Taillefumier\n\ninclude(FindPackageHandleStandardArgs)\n\nset(_FLEXIBLAS_PATHS ${FLEXIBLAS_ROOT}\n  $ENV{FLEXIBLAS_ROOT}\n  $ENV{FLEXIBLASROOT}\n  $ENV{FLEXIBLAS_DIR}\n  $ENV{FLEXIBLASDIR}\n  $ENV{ORNL_FLEXIBLAS_ROOT}\n  $ENV{CRAY_FLEXIBLAS_ROOT})\n\n# try first with pkg-config\nfind_package(PkgConfig QUIET)\n\nif(PKG_CONFIG_FOUND)\n  pkg_check_modules(COSMA_FLEXIBLAS IMPORTED_TARGET GLOBAL flexiblas)\nendif()\n\nfind_package_handle_standard_args(\n  FLEXIBLAS DEFAULT_MSG COSMA_FLEXIBLAS_INCLUDE_DIRS\n  COSMA_FLEXIBLAS_LINK_LIBRARIES)\n\nif(COSMA_FLEXIBLAS_FOUND)\n  set(COSMA_BLAS_VENDOR \"FlexiBLAS\")\n  \n  if(NOT TARGET cosma::BLAS::FLEXIBLAS::flexiblas)\n    add_library(cosma::BLAS::FLEXIBLAS::flexiblas INTERFACE IMPORTED)\n    add_library(cosma::BLAS::FLEXIBLAS::blas ALIAS cosma::BLAS::FLEXIBLAS::flexiblas)\n  endif()\n  set_target_properties(\n    cosma::BLAS::FLEXIBLAS::flexiblas PROPERTIES INTERFACE_LINK_LIBRARIES\n    \"${COSMA_FLEXIBLAS_LINK_LIBRARIES}\")\n  if(COSMA_FLEXIBLAS_INCLUDE_DIRS)\n    set_target_properties(\n      cosma::BLAS::FLEXIBLAS::flexiblas PROPERTIES INTERFACE_INCLUDE_DIRECTORIES\n      \"${COSMA_FLEXIBLAS_INCLUDE_DIRS}\")\n  endif()\nendif()\n\nmark_as_advanced(COSMA_FLEXIBLAS_FOUND COSMA_FLEXIBLAS_INCLUDE_DIRS\n                 COSMA_FLEXIBLAS_LINK_LIBRARIES COSMA_BLAS_VENDOR)\n"
  },
  {
    "path": "cmake/FindGenericBLAS.cmake",
    "content": "# Copyright (c) 2022- ETH Zurich\n#\n# authors : Mathieu Taillefumier\ninclude(FindPackageHandleStandardArgs)\n\nif(NOT POLICY CMP0074)\n  set(_GenericBLAS_PATHS ${GenericBLAS_ROOT} $ENV{GenericBLAS_ROOT})\nendif()\n\nfind_library(\n  COSMA_GenericBLAS_LINK_LIBRARIES\n  NAMES \"blas\"\n  HINTS ${_GenericBLAS_PATHS})\nfind_library(\n  # optinally look for cblas library - not required\n  COSMA_GenericBLAS_CBLAS_LIBRARIES\n  NAMES \"cblas\"\n  HINTS ${_GenericBLAS_PATHS})\nfind_path(\n  COSMA_GenericBLAS_INCLUDE_DIRS\n  NAMES \"cblas.h\"\n  HINTS ${_GenericBLAS_PATHS})\n\n# check if found\nif(COSMA_GenericBLAS_INCLUDE_DIRS)\n  find_package_handle_standard_args(\n    GenericBLAS REQUIRED_VARS COSMA_GenericBLAS_INCLUDE_DIRS COSMA_GenericBLAS_LINK_LIBRARIES)\nelse()\n  find_package_handle_standard_args(GenericBLAS\n                                    REQUIRED_VARS COSMA_GenericBLAS_LINK_LIBRARIES)\nendif()\n\nif(COSMA_GenericBLAS_CBLAS_LINK_LIBRARIES)\n  list(APPEND GenericBLAS_LINK_LIBRARIES ${GenericBLAS_CBLAS_LINK_LIBRARIES})\nendif()\n\n# add target to link against\nif(NOT TARGET cosma::BLAS::GenericBLAS::blas)\n  add_library(cosma::BLAS::GenericBLAS::blas INTERFACE IMPORTED)\nendif()\nset_property(TARGET cosma::BLAS::GenericBLAS::blas PROPERTY INTERFACE_LINK_LIBRARIES\n  ${COSMA_GenericBLAS_LINK_LIBRARIES})\nset_property(\n  TARGET cosma::BLAS::GenericBLAS::blas PROPERTY INTERFACE_INCLUDE_DIRECTORIES\n  ${COSMA_GenericBLAS_INCLUDE_DIRS})\nendif()\n\n# prevent clutter in cache\nmark_as_advanced(COSMA_GenericBLAS_FOUND COSMA_GenericBLAS_LINK_LIBRARIES\n                 COSMA_GenericBLAS_INCLUDE_DIRS COSMA_GenericBLAS_CBLAS_LIBRARIES)\n"
  },
  {
    "path": "cmake/FindMKL.cmake",
    "content": "#\n# CMake recipes https://github.com/eth-cscs/cmake-recipes\n#\n# Copyright (c) 2018-2019, ETH Zurich BSD 3-Clause License. All rights reserved.\n#\n# Author: Teodor Nikolov (teodor.nikolov22@gmail.com)\n#\n#[=======================================================================[.rst:\nFindMKL\n-------\n\nThe following conventions are used:\n\nintel / INTEL  - Bindings for everything except GNU Fortran\ngf / GF        - GNU Fortran bindings\nseq / SEQ      - sequential MKL\nomp / OMP      - threaded MKL with OpenMP back end\ntbb / TBB      - threaded MKL with TBB back end\n32bit / 32BIT  - MKL 32 bit integer interface (used most often)\n64bit / 64BIT  - MKL 64 bit integer interface\nmpich / MPICH  - MPICH / IntelMPI BLACS back end\nompi / OMPI    - OpenMPI BLACS back end\nst / ST        - static libraries\ndyn / DYN      - dynamic libraries\n\nThe module attempts to define a target for each MKL configuration. The\nconfiguration will not be available if there are missing library files or a\nmissing dependency.\n\nMKL Link line advisor:\nhttps://software.intel.com/en-us/articles/intel-mkl-link-line-advisor\n\nNote: Mixing GCC and Intel OpenMP backends is a bad idea.\n\nSearch variables\n^^^^^^^^^^^^^^^^\n\n``MKLROOT``\nEnvironment variable set to MKL's root directory\n\n``MKL_ROOT``\nCMake variable set to MKL's root directory\n\nExample usage\n^^^^^^^^^^^^^\n\nTo Find MKL:\n\nfind_package(MKL REQUIRED)\n\nTo check if target is available:\n\nif (TARGET MKL::scalapack_mpich_intel_32bit_omp_dyn)\n  ...\nendif()\n\nTo link to an available target (see list below):\n\ntarget_link_libraries(... MKL::scalapack_mpich_intel_32bit_omp_dyn)\n\nNote: dependencies are handled for you (MPI, OpenMP, ...)\n\nthe target MKL::blas, MKL::MKL, MKL::lapack also include all necessary libraries\nfor linking.\nMKL::MKL is also used by the cmake module provided by intel.\n\nMKL::scalapack_link gives all libraries needed for scalapack.\n\nImported targets\n^^^^^^^^^^^^^^^^\n\nMKL (BLAS, LAPACK, FFT) targets:\n\nMKL::[gf|intel]_[32bit|64bit]_[seq|omp|tbb]_[st|dyn] e.g.\n\nMKL::mkl_intel_32bit_omp_dyn\n\nBLACS targets:\n\nMKL::blacs_[mpich|ompi]_[gf|intel]_[32bit|64bit]_[seq|omp|tbb]_[st|dyn] e.g.\n\nMKL::blacs_intel_mpich_32bit_seq_st\n\n\nScaLAPACK targets:\n\nMKL::scalapack_[mpich|ompi]_[gf|intel]_[32bit|64bit]_[seq|omp|tbb]_[st|dyn] e.g.\n\nMKL::scalapack_mpich_intel_64bit_omp_dyn\n\nResult variables\n^^^^^^^^^^^^^^^^\n\nMKL_FOUND\n\nNot supported\n^^^^^^^^^^^^^\n\n- F95 interfaces\n\n#]=======================================================================]\n\n# Copyright (c) 2022- ETH Zurich\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n#    list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n#    this list of conditions and the following disclaimer in the documentation\n#    and/or other materials provided with the distribution.\n# 3. Neither the name of the copyright holder nor the names of its contributors\n#    may be used to endorse or promote products derived from this software\n#    without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\ninclude(FindPackageHandleStandardArgs)\n\nif(NOT\n   (CMAKE_C_COMPILER_LOADED\n    OR CMAKE_CXX_COMPILER_LOADED\n    OR CMAKE_Fortran_COMPILER_LOADED))\n  message(FATAL_ERROR \"FindMKL requires Fortran, C, or C++ to be enabled.\")\nendif()\n\n# Dependencies\n#\nenable_language(Fortran)\nenable_language(C)\nfind_package(Threads)\nfind_package(MPI COMPONENTS CXX C Fortran)\nfind_package(OpenMP COMPONENTS CXX C Fortran)\n\n# If MKL_ROOT is not set, set it via the env variable MKLROOT.\n#\nif(NOT DEFINED MKL_ROOT)\n  set(MKL_ROOT\n      $ENV{MKLROOT}\n      CACHE PATH \"MKL's root directory.\")\nendif()\n\n# Determine MKL's library folder\n#\nset(_mkl_libpath_suffix \"intel64\")\nif(CMAKE_SIZEOF_VOID_P EQUAL 4) # 32 bit\n  set(_mkl_libpath_suffix \"ia32\")\nendif()\n\nif(WIN32)\n  list(APPEND _mkl_libpath_suffix_list ${_mkl_libpath_suffix})\n  string(APPEND _mkl_libpath_suffix \"_win\")\n  list(APPEND _mkl_libpath_suffix_list ${_mkl_libpath_suffix})\n  set(_mkl_libname_prefix \"\")\n  set(_mkl_shared_lib \"_dll.lib\")\n  set(_mkl_static_lib \".lib\")\nelseif(APPLE)\n  list(APPEND _mkl_libpath_suffix_list ${_mkl_libpath_suffix})\n  string(APPEND _mkl_libpath_suffix \"_mac\")\n  list(APPEND _mkl_libpath_suffix_list ${_mkl_libpath_suffix})\n  set(_mkl_libname_prefix \"lib\")\n  set(_mkl_shared_lib \".dylib\")\n  set(_mkl_static_lib \".a\")\nelse() # LINUX\n  list(APPEND _mkl_libpath_suffix_list ${_mkl_libpath_suffix})\n  string(APPEND _mkl_libpath_suffix \"_lin\")\n  list(APPEND _mkl_libpath_suffix_list ${_mkl_libpath_suffix})\n  set(_mkl_libname_prefix \"lib\")\n  set(_mkl_shared_lib \".so\")\n  set(_mkl_static_lib \".a\")\nendif()\nset(_mkl_search_paths \"${MKL_ROOT}\" \"${MKL_ROOT}/lib\" \"${MKL_ROOT}/mkl/lib\"\n                      \"${MKL_ROOT}/compiler/lib\")\n\n# Functions: finds both static and shared MKL libraries\n#\nfunction(__mkl_find_library _varname _libname)\n  find_library(\n    ${_varname}_DYN\n    NAMES ${_mkl_libname_prefix}${_libname}${_mkl_shared_lib}\n    HINTS ${_mkl_search_paths}\n    PATH_SUFFIXES ${_mkl_libpath_suffix_list})\n  mark_as_advanced(${_varname}_DYN)\n  find_library(\n    ${_varname}_ST\n    NAMES ${_mkl_libname_prefix}${_libname}${_mkl_static_lib}\n    HINTS ${_mkl_search_paths}\n    PATH_SUFFIXES ${_mkl_libpath_suffix_list})\n  mark_as_advanced(${_varname}_ST)\nendfunction()\n\n# Find MKL headers\n#\nfind_path(COSMA_MKL_INCLUDE_DIRS mkl.h HINTS ${MKL_ROOT}/include\n                                            ${MKL_ROOT}/mkl/include)\nmark_as_advanced(COSMA_MKL_INCLUDE_DIRS)\n\n# Group flags for static libraries on Linux (GNU, PGI, ICC -> same linker)\n#\nif(UNIX AND NOT APPLE)\n  set(_mkl_linker_pre_flags_ST \"-Wl,--start-group\")\n  set(_mkl_linker_post_flags_ST \"-Wl,--end-group\")\nendif()\n\n# Core MKL\n#\n__mkl_find_library(MKL_CORE_LIB mkl_core)\n\n# Interface\n#\n__mkl_find_library(MKL_INTERFACE_INTEL_32BIT_LIB mkl_intel_lp64)\n__mkl_find_library(MKL_INTERFACE_INTEL_64BIT_LIB mkl_intel_ilp64)\nif(NOT APPLE\n   AND CMAKE_Fortran_COMPILER_LOADED\n   AND CMAKE_Fortran_COMPILER_ID STREQUAL \"GNU\")\n  __mkl_find_library(MKL_INTERFACE_GF_32BIT_LIB mkl_gf_lp64)\n  __mkl_find_library(MKL_INTERFACE_GF_64BIT_LIB mkl_gf_ilp64)\nendif()\n\n# Threading\n#\n__mkl_find_library(MKL_SEQ_LIB mkl_sequential)\nif(NOT APPLE\n   AND (CMAKE_C_COMPILER_ID STREQUAL \"GNU\"\n        OR CMAKE_CXX_COMPILER_ID STREQUAL \"GNU\"\n        OR CMAKE_Fortran_COMPILER_ID STREQUAL \"GNU\"))\n  __mkl_find_library(MKL_OMP_LIB mkl_gnu_thread)\nelse()\n  __mkl_find_library(MKL_OMP_LIB mkl_intel_thread)\nendif()\n__mkl_find_library(MKL_TBB_LIB mkl_tbb_thread)\n\n# BLACS\n#\nif(APPLE)\n  __mkl_find_library(MKL_BLACS_MPICH_32BIT_LIB mkl_blacs_mpich_lp64)\n  __mkl_find_library(MKL_BLACS_MPICH_64BIT_LIB mkl_blacs_mpich_ilp64)\nelse()\n  __mkl_find_library(MKL_BLACS_MPICH_32BIT_LIB mkl_blacs_intelmpi_lp64)\n  __mkl_find_library(MKL_BLACS_MPICH_64BIT_LIB mkl_blacs_intelmpi_ilp64)\nendif()\n__mkl_find_library(MKL_BLACS_OMPI_32BIT_LIB mkl_blacs_openmpi_lp64)\n__mkl_find_library(MKL_BLACS_OMPI_64BIT_LIB mkl_blacs_openmpi_ilp64)\n\n# ScaLAPACK\n#\n__mkl_find_library(MKL_SCALAPACK_32BIT_LIB mkl_scalapack_lp64)\n__mkl_find_library(MKL_SCALAPACK_64BIT_LIB mkl_scalapack_ilp64)\n\n# Check if core libs were found\n#\nfind_package_handle_standard_args(MKL REQUIRED_VARS COSMA_MKL_INCLUDE_DIRS\n                                                    Threads_FOUND)\n\n# Sequential has no threading dependency. There is currently no TBB module\n# shipped with CMake. The dependency is not accounted for. (FIXME)\n#\nset(_mkl_dep_found_SEQ TRUE)\nset(_mkl_dep_found_TBB TRUE)\nif(TARGET OpenMP::OpenMP_CXX)\n  set(_mkl_dep_OMP ${OpenMP_CXX_LIBRARIES})\n  set(_mkl_dep_found_OMP TRUE)\nendif()\n\n# Define all blas, blacs and scalapack\n#\nforeach(_libtype \"ST\" \"DYN\")\n  set(_mkl_core_lib ${MKL_CORE_LIB_${_libtype}})\n  foreach(_bits \"32BIT\" \"64BIT\")\n    set(_mkl_scalapack_lib ${MKL_SCALAPACK_${_bits}_LIB_${_libtype}})\n    foreach(_iface \"INTEL\" \"GF\")\n      set(_mkl_interface_lib\n          ${MKL_INTERFACE_${_iface}_${_bits}_LIB_${_libtype}})\n      foreach(_threading \"SEQ\" \"OMP\" \"TBB\")\n        set(_mkl_threading_lib ${MKL_${_threading}_LIB_${_libtype}})\n\n        string(TOLOWER \"${_iface}_${_bits}_${_threading}_${_libtype}\"\n                       _tgt_config)\n        set(_mkl_tgt cosma::BLAS::MKL::${_tgt_config})\n\n        if(MKL_FOUND\n           AND _mkl_interface_lib\n           AND _mkl_threading_lib\n           AND _mkl_core_lib\n           AND _mkl_dep_found_${_threading}\n           AND NOT TARGET ${_mkl_tgt})\n          set(_mkl_libs\n              \"${_mkl_linker_pre_flags_${_threading}}\"\n              \"${_mkl_interface_lib}\"\n              \"${_mkl_threading_lib}\"\n              \"${_mkl_core_lib}\"\n              \"${_mkl_linker_post_flags_${_threading}}\"\n              \"${_mkl_dep_${_threading}}\"\n              \"Threads::Threads\")\n          add_library(${_mkl_tgt} INTERFACE IMPORTED)\n          set_target_properties(\n            ${_mkl_tgt}\n            PROPERTIES INTERFACE_INCLUDE_DIRECTORIES \"${COSMA_MKL_INCLUDE_DIRS}\"\n                       INTERFACE_LINK_LIBRARIES \"${_mkl_libs}\")\n        endif()\n\n        foreach(_mpi_impl \"MPICH\" \"OMPI\")\n          set(_mkl_blacs_lib ${MKL_BLACS_${_mpi_impl}_${_bits}_LIB_${_libtype}})\n\n          string(\n            TOLOWER \"${_mpi_impl}_${_iface}_${_bits}_${_threading}_${_libtype}\"\n                    _tgt_config)\n\n          set(_scalapack_tgt cosma::BLAS::MKL::scalapack_${_tgt_config})\n\n          if(_mkl_blacs_lib\n             AND TARGET ${_mkl_tgt}\n             AND TARGET MPI::MPI_CXX\n             AND NOT TARGET cosma::BLAS::MKL::blacs_${_tgt_config})\n            set(_blacs_libs\n                \"${_mkl_linker_pre_flags_${_libtype}}\"\n                \"${_mkl_interface_lib}\"\n                \"${_mkl_threading_lib}\"\n                \"${_mkl_core_lib}\"\n                \"${_mkl_blacs_lib}\"\n                \"${_mkl_linker_post_flags_${_libtype}}\"\n                \"MPI::MPI_CXX\"\n                \"${_mkl_dep_${_threading}}\"\n                \"Threads::Threads\")\n            add_library(cosma::BLAS::MKL::blacs_${_tgt_config} INTERFACE IMPORTED)\n            set_target_properties(\n              cosma::BLAS::MKL::blacs_${_tgt_config}\n              PROPERTIES INTERFACE_INCLUDE_DIRECTORIES\n                         \"${COSMA_MKL_INCLUDE_DIRS}\" INTERFACE_LINK_LIBRARIES\n                                                    \"${_mkl_blacs_lib}\")\n          endif()\n\n          if(_mkl_scalapack_lib AND NOT TARGET\n                                    cosma::BLAS::MKL::scalapack_${_tgt_config})\n            set(_scalapack_libs \"${_mkl_scalapack_lib}\" \"${_blacs_tgt}\")\n            add_library(cosma::BLAS::MKL::scalapack_${_tgt_config} INTERFACE IMPORTED)\n            set_target_properties(\n              cosma::BLAS::MKL::scalapack_${_tgt_config}\n              PROPERTIES INTERFACE_LINK_LIBRARIES \"${_scalapack_libs}\")\n          endif()\n        endforeach()\n      endforeach()\n    endforeach()\n  endforeach()\nendforeach()\n\nif(MKL_FOUND)\n  # BLAS in the Intel MKL 10+ library?\n\n  # the findMKL package finds all possible combination and define target for\n  # each of them we just need to find which compiler we use, mpi etc...\n\n  if(CMAKE_Fortran_COMPILER_LOADED\n     AND CMAKE_Fortran_COMPILER_ID STREQUAL \"GNU\"\n     AND NOT APPLE)\n    set(COSMA_BLAS_mkl_INTFACE \"gf\")\n  else()\n\t  set(COSMA_BLAS_mkl_INTFACE \"intel\")\n  endif()\n\n  if(COSMA_BLAS_THREADING MATCHES \"thread|gnu-thread|openmp\")\n\t  set(COSMA_BLAS_mkl_thread__ \"omp\")\n  endif()\n\n  if(COSMA_BLAS_THREADING MATCHES \"sequential\")\n\t  set(COSMA_BLAS_mkl_thread__ \"seq\")\n  endif()\n\n  if(COSMA_BLAS_THREADING MATCHES \"intel-thread\")\n\t  set(COSMA_BLAS_mkl_thread__ \"intel\")\n  endif()\n\n  if(COSMA_BLAS_THREADING MATCHES \"tbb\")\n\t  set(COSMA_BLAS_mkl_thread__ \"tbb\")\n  endif()\n\n  if(COSMA_BLAS_INTERFACE MATCHES \"64bits\")\n\t  set(COSMA_BLAS_mkl_ILP_MODE \"64bit\")\n  else()\n\t  set(COSMA_BLAS_mkl_ILP_MODE \"32bit\")\n  endif()\n\n  get_target_property(\n    MKL_BLAS_INCLUDE_DIRS\n    cosma::BLAS::MKL::${COSMA_BLAS_mkl_INTFACE}_${COSMA_BLAS_mkl_ILP_MODE}_${COSMA_BLAS_mkl_thread__}_dyn\n    INTERFACE_INCLUDE_DIRECTORIES)\n  get_target_property(\n    MKL_BLAS_LIBRARIES\n    cosma::BLAS::MKL::${COSMA_BLAS_mkl_INTFACE}_${COSMA_BLAS_mkl_ILP_MODE}_${COSMA_BLAS_mkl_thread__}_dyn\n    INTERFACE_LINK_LIBRARIES)\n  if(NOT TARGET cosma::BLAS::MKL::blas)\n    add_library(cosma::BLAS::MKL::MKL INTERFACE IMPORTED)\n    add_library(cosma::BLAS::MKL::blas ALIAS cosma::BLAS::MKL::MKL)\n    # create a empty lapack\n    add_library(cosma::BLAS::MKL::lapack INTERFACE IMPORTED)\n  endif()\n  set_target_properties(\n    cosma::BLAS::MKL::MKL\n    PROPERTIES INTERFACE_INCLUDE_DIRECTORIES \"${COSMA_MKL_INCLUDE_DIRS}\"\n               INTERFACE_LINK_LIBRARIES \"${MKL_BLAS_LIBRARIES}\")\n\n  if(\"${MPI_CXX_LIBRARY_VERSION_STRING}\" MATCHES \"Open MPI\")\n    set(__mkl_mpi_ver_ \"ompi\")\n  else()\n    set(__mkl_mpi_ver_ \"mpich\")\n  endif()\n\n  get_target_property(\n    __mkl_scalapack_inc\n    cosma::BLAS::MKL::scalapack_${__mkl_mpi_ver_}_${COSMA_BLAS_mkl_INTFACE}_${COSMA_BLAS_mkl_ILP_MODE}_${COSMA_BLAS_mkl_thread__}_dyn\n    INTERFACE_INCLUDE_DIRECTORIES)\n  get_target_property(\n    __mkl_scalapack_lib\n    cosma::BLAS::MKL::scalapack_${__mkl_mpi_ver_}_${COSMA_BLAS_mkl_INTFACE}_${COSMA_BLAS_mkl_ILP_MODE}_${COSMA_BLAS_mkl_thread__}_dyn\n    INTERFACE_LINK_LIBRARIES)\n  get_target_property(\n    __mkl_blacs_inc\n    cosma::BLAS::MKL::blacs_${__mkl_mpi_ver_}_${COSMA_BLAS_mkl_INTFACE}_${COSMA_BLAS_mkl_ILP_MODE}_${COSMA_BLAS_mkl_thread__}_dyn\n    INTERFACE_INCLUDE_DIRECTORIES)\n  get_target_property(\n    __mkl_blacs_lib\n    cosma::BLAS::MKL::blacs_${__mkl_mpi_ver_}_${COSMA_BLAS_mkl_INTFACE}_${COSMA_BLAS_mkl_ILP_MODE}_${COSMA_BLAS_mkl_thread__}_dyn\n    INTERFACE_LINK_LIBRARIES)\n  if(NOT TARGET cosma::BLAS::MKL::scalapack_link)\n    add_library(cosma::BLAS::MKL::scalapack_link INTERFACE IMPORTED)\n    set_target_properties(\n      cosma::BLAS::MKL::scalapack_link\n      PROPERTIES INTERFACE_INCLUDE_DIRECTORIES \"${__mkl_scalapack_inc}\"\n                 INTERFACE_LINK_LIBRARIES\n                 \"${__mkl_scalapack_lib};${__mkl_blacs_lib}\")\n  endif()\n  unset(COSMA_BLAS_mkl_ILP_MODE)\n  unset(COSMA_BLAS_mkl_INTFACE)\n  unset(COSMA_BLAS_mkl_thread__)\n  unset(BLAS_mkl_OMP)\n  unset(BLAS_mkl_OS_NAME)\n  unset(__mkl_blacs_lib)\n  unset(__mkl_blacs_inc)\n  unset(__mkl_scalapack_lib)\n  unset(__mkl_scalapack_inc)\n  set(COSMA_BLAS_VENDOR \"MKL\")\n  set(COSMA_MKL_SCALAPACK_VENDOR TRUE)\n  mark_as_advanced(COSMA_BLAS_VENDOR)\n  mark_as_advanced(COSMA_MKL_FOUND)\n  mark_as_advanced(COSMA_MKL_SCALAPACK_VENDOR)\nendif()\n"
  },
  {
    "path": "cmake/FindNCCL.cmake",
    "content": "include(FindPackageHandleStandardArgs)\n\nfind_path(COSMA_NCCL_INCLUDE_DIRS\n  NAMES nccl.h\n  HINTS\n  ${NCCL_ROOT}\n  ENV NCCLROOT\n)\n\nfind_library(COSMA_NCCL_LIBRARIES\n  NAMES nccl nccl_static\n  HINTS\n  ${NCCL_ROOT}\n  ENV NCCLROOT\n)\n\nfind_package_handle_standard_args(NCCL DEFAULT_MSG COSMA_NCCL_INCLUDE_DIRS COSMA_NCCL_LIBRARIES)\n\nif (NCCL_FOUND AND NOT TARGET cosma::nccl)\n  add_library(cosma::nccl INTERFACE IMPORTED)\n  set_target_properties(cosma::nccl\n    PROPERTIES\n    INTERFACE_INCLUDE_DIRECTORIES ${COSMA_NCCL_INCLUDE_DIRS}\n    INTERFACE_LINK_LIBRARIES ${COSMA_NCCL_LIBRARIES})\nendif()\n"
  },
  {
    "path": "cmake/FindNVPL.cmake",
    "content": "find_package(\"nvpl_blas\" REQUIRED)\nfind_package(\"nvpl_lapack\" REQUIRED)\nfind_package(\"nvpl_scalapack\" REQUIRED)\n\nif(COSMA_BLAS_INTERFACE STREQUAL \"32bits\")\n  set(_nvpl_int \"_lp64\")\nelse()\n  set(_nvpl_int \"_ilp64\")\nendif()\n\nif(COSMA_BLAS_THREADING STREQUAL \"openmp\")\n  set(_nvpl_thread \"_omp\")\nelse()\n  set(_nvpl_thread \"_seq\")\nendif()\n\nif(\"${MPI_CXX_LIBRARY_VERSION_STRING}\" MATCHES \"Open MPI\")\n  if(MPI_VERSION VERSION_GREATER_EQUAL \"5.0\")\n    set(_nvpl_mpi \"_openmpi5\")\n  elseif(MPI_VERSION VERSION_GREATER_EQUAL \"4.0\")\n    set(_nvpl_mpi \"_openmpi4\")\n  else(MPI_VERSION VERSION_GREATER_EQUAL \"3.0\")\n    set(_nvpl_mpi \"_openmpi3\")\n  endif()\nelse()\n  set(_nvpl_mpi \"_mpich\")\nendif()\n\nif(NOT TARGET \"cosma::BLAS::NVPL::nvpl\")\n  add_library(\"cosma::BLAS::NVPL::nvpl\" INTERFACE IMPORTED)\n  target_link_libraries(\"cosma::BLAS::NVPL::nvpl\" INTERFACE\n    \"nvpl::blas${_nvpl_int}${_nvpl_thread}\" \"nvpl::lapack${_nvpl_int}${_nvpl_thread}\"\n    \"nvpl::blacs${_nvpl_int}${_nvpl_mpi}\" \"nvpl::scalapack${_nvpl_int}\")\n\n  get_target_property(COSMA_NVPL_LAPACK_LIBRARIES \"nvpl::lapack${_nvpl_int}${_nvpl_thread}\" INTERFACE_LINK_LIBRARIES)\n  get_target_property(COSMA_NVPL_SCALAPACK_LIBRARIES \"nvpl::scalapack${_nvpl_int}\" INTERFACE_LINK_LIBRARIES)\n  get_target_property(COSMA_NVPL_BLAS_INCLUDE_DIRS \"nvpl::blas${_nvpl_int}${_nvpl_thread}\" INTERFACE_INCLUDE_DIRECTORIES)\n  get_target_property(COSMA_NVPL_LAPACK_INCLUDE_DIRS \"nvpl::lapack${_nvpl_int}${_nvpl_thread}\" INTERFACE_INCLUDE_DIRECTORIES)\n  get_target_property(COSMA_NVPL_SCALAPACK_INCLUDE_DIRS \"nvpl::scalapack${_nvpl_int}\" INTERFACE_INCLUDE_DIRECTORIES)\n\n  set_target_properties(\n    cosma::BLAS::NVPL::nvpl \n    PROPERTIES INTERFACE_LINK_LIBRARIES \n    \"${COSMA_NVPL_LAPACK_LIBRARIES}\")\n  set_target_properties(\n    cosma::BLAS::NVPL::nvpl\n    PROPERTIES INTERFACE_INCLUDE_DIRECTORIES \n    \"${COSMA_NVPL_BLAS_INCLUDE_DIRS};${COSMA_NVPL_LAPACK_INCLUDE_DIRS}\")\n\n  add_library(cosma::BLAS::NVPL::blas ALIAS cosma::BLAS::NVPL::nvpl)\n\n  add_library(cosma::BLAS::NVPL::scalapack_link INTERFACE IMPORTED)\n  set_target_properties(\n    cosma::BLAS::NVPL::scalapack_link \n    PROPERTIES INTERFACE_LINK_LIBRARIES \n    \"${COSMA_NVPL_LAPACK_LIBRARIES};${COSMA_NVPL_SCALAPACK_LIBRARIES}\")\n  set_target_properties(\n    cosma::BLAS::NVPL::scalapack_link \n    PROPERTIES INTERFACE_INCLUDE_DIRECTORIES \n    \"${COSMA_NVPL_BLAS_INCLUDE_DIRS};${COSMA_NVPL_LAPACK_INCLUDE_DIRS};${COSMA_NVPL_SCALAPACK_INCLUDE_DIRS}\")\nendif()\n"
  },
  {
    "path": "cmake/FindOPENBLAS.cmake",
    "content": "# find OPENBLAS\n\ninclude(FindPackageHandleStandardArgs)\n\n# if(NOT POLICY CMP0074)\nset(_OPENBLAS_PATHS ${OPENBLAS_ROOT}\n    $ENV{OPENBLAS_ROOT}\n    $ENV{OPENBLASROOT}\n    $ENV{OPENBLAS_DIR}\n    $ENV{OPENBLASDIR})\n# endif()\n\nfind_path(COSMA_OPENBLAS_INCLUDE_DIRS\n    NAMES \"cblas-openblas.h\" \"cblas_openblas.h\" \"cblas.h\"\n    PATH_SUFFIXES \"openblas\" \"openblas/include\" \"include\" \"include/openblas\"\n    HINTS ${_OPENBLAS_PATHS}\n    DOC \"openblas include directory\")\n\n  find_library(COSMA_OPENBLAS_LINK_LIBRARIES\n    NAMES openblas\n    PATH_SUFFIXES \"lib\" \"lib64\" \"openblas/lib\" \"openblas/lib64\" \"openblas\"\n    HINTS ${_OPENBLAS_PATHS}\n    DOC \"openblas libraries list\")\n\n  find_package_handle_standard_args(OPENBLAS\n    DEFAULT_MSG\n    COSMA_OPENBLAS_LINK_LIBRARIES COSMA_OPENBLAS_INCLUDE_DIRS)\n\n  if(NOT TARGET cosma::BLAS::OPENBLAS::openblas)\n    add_library(cosma::BLAS::OPENBLAS::openblas INTERFACE IMPORTED)\n    add_library(cosma::BLAS::OPENBLAS::blas ALIAS cosma::BLAS::OPENBLAS::openblas)\n  endif()\n  set_property(TARGET cosma::BLAS::OPENBLAS::openblas\n    PROPERTY INTERFACE_LINK_LIBRARIES ${COSMA_OPENBLAS_LINK_LIBRARIES})\n  set_property(TARGET cosma::BLAS::OPENBLAS::openblas\n    PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${COSMA_OPENBLAS_INCLUDE_DIRS})\n\n# prevent clutter in cache\nMARK_AS_ADVANCED(OPENBLAS_FOUND OPENBLAS_LIBRARIES OPENBLAS_INCLUDE_DIRS)\n"
  },
  {
    "path": "cmake/FindSCALAPACK.cmake",
    "content": "include(FindPackageHandleStandardArgs)\n\nif(COSMA_SCALAPACK STREQUAL \"MKL\")\n  find_package(MKL REQUIRED)\n  get_target_property(COSMA_SCALAPACK_LINK_LIBRARIES cosma::BLAS::MKL::scalapack_link\n    INTERFACE_LINK_LIBRARIES)\nelseif(COSMA_SCALAPACK STREQUAL \"CRAY_LIBSCI\")\n  find_package(CRAY_LIBSCI REQUIRED)\n  get_target_property(COSMA_SCALAPACK_LINK_LIBRARIES cosma::BLAS::CRAY_LIBSCI::scalapack_link\n    INTERFACE_LINK_LIBRARIES)\nelseif(COSMA_SCALAPACK STREQUAL \"NVPL\")\n  find_package(NVPL REQUIRED)\n  get_target_property(COSMA_SCALAPACK_LINK_LIBRARIES cosma::BLAS::NVPL::scalapack_link\n    INTERFACE_LINK_LIBRARIES)\n  message(WARNING \"COSMA_SCALAPACK_LINK_LIBRARIES: ${COSMA_SCALAPACK_LINK_LIBRARIES}\")\nelseif(COSMA_SCALAPACK STREQUAL \"CUSTOM\")\n  find_library(COSMA_SCALAPACK_LINK_LIBRARIES\n    NAMES scalapack\n    HINTS\n    ${_COSMA_SCALAPACK_LIBRARY_DIRS}\n    ENV SCALAPACKROOT\n    ENV SCALAPACK_ROOT\n    ENV ORNL_SCALAPACK_ROOT\n    ENV SCALAPACK_PREFIX\n    ENV SCALAPACK_DIR\n    ENV SCALAPACKDIR\n    /usr/bin\n    PATH_SUFFIXES lib\n    DOC \"Path to the scalapack library.\")\nendif()\n\nfind_package_handle_standard_args(SCALAPACK REQUIRED_VARS COSMA_SCALAPACK_LINK_LIBRARIES)\n\nset(COSMA_SCALAPACK_FOUND \"YES\")\n\nif (NOT TARGET cosma::scalapack::scalapack)\n  add_library(cosma::scalapack::scalapack INTERFACE IMPORTED)\n  set_target_properties(\n    cosma::scalapack::scalapack PROPERTIES INTERFACE_LINK_LIBRARIES\n    \"${COSMA_SCALAPACK_LINK_LIBRARIES}\")\nendif()\n\nmark_as_advanced(COSMA_SCALAPACK_LINK_LIBRARIES COSMA_SCALAPACK_FOUND)\n"
  },
  {
    "path": "cmake/GitSubmodule.cmake",
    "content": "# Call to ensure that the git submodule in location `path` is loaded.\n# If the submodule is not loaded, an error message that describes\n# how to update the submodules is printed.\n# Sets the variable name_avail to `ON` if the submodule is available,\n# or `OFF` otherwise.\n# copyright github.com/arbor-sim\n\nfunction(check_git_submodule name path)\n    set(success_var \"${name}_avail\")\n    set(${success_var} ON PARENT_SCOPE)\n\n    get_filename_component(dotgit \"${path}/.git\" ABSOLUTE)\n    if(NOT EXISTS ${dotgit})\n        message(\n            \"\\nThe git submodule for ${name} is not available.\\n\"\n            \"To check out all submodules use the following commands:\\n\"\n            \"    git submodule init\\n\"\n            \"    git submodule update\\n\"\n            \"Or download submodules recursively when checking out:\\n\"\n            \"    git clone --recursive https://github.com/eth-cscs/COSMA.git\\n\"\n        )\n\n        # if the repository was not available, and git failed, set AVAIL to false\n        set(${success_var} OFF PARENT_SCOPE)\n    endif()\nendfunction()\n\nfunction(add_git_submodule_or_find_external name path)\n  check_git_submodule(${name} ${path})\n  if(NOT ${name}_avail)\n    # attempt to find system installation of pybind11\n    find_package(${name} REQUIRED)\n  else()\n    message(VERBOSE \"Using ${name} as git submodule from ${path}\")\n    add_subdirectory(\"${path}\")\n  endif()\nendfunction()\n"
  },
  {
    "path": "cmake/adjust_mpiexec_flags.cmake",
    "content": "# Appends the --oversubscribe flag if OpenMPI.\n#\nfunction(adjust_mpiexec_flags)\n    execute_process(COMMAND mpirun --version OUTPUT_VARIABLE MPIRUN_OUTPUT)\n    string(FIND \"${MPIRUN_OUTPUT}\" \"Open MPI\" OMPI_POS)\n    if(NOT OMPI_POS STREQUAL \"-1\")\n        set(MPIEXEC_PREFLAGS \"--oversubscribe;${MPIEXEC_PREFLAGS}\" CACHE STRING \"These flags will be directly before the executable that is being run by mpiexec.\" FORCE)\n        set(MPI_TYPE \"ompi\" PARENT_SCOPE)\n    else()\n        set(MPI_TYPE \"mpich\" PARENT_SCOPE)\n    endif()\nendfunction()\n"
  },
  {
    "path": "cmake/build_type.cmake",
    "content": "# Set default to Release if none was specified and update the docs.\n#\nset(default_build_type ${CMAKE_BUILD_TYPE})\nif(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)\n    message(STATUS \"Setting build type to 'Release' as none was specified.\")\n    set(default_build_type \"Release\")\nendif()\nset(CMAKE_BUILD_TYPE \"${default_build_type}\" CACHE STRING \"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Profile.\" FORCE)\n\n# Define a custom build type\n#\n#set( CMAKE_CXX_FLAGS_PROFILE \"${CMAKE_CXX_FLAGS_RELEASE}\" CACHE STRING \"\" FORCE)\n#set( CMAKE_C_FLAGS_PROFILE \"${CMAKE_C_FLAGS_RELEASE}\" CACHE STRING \"\" FORCE )\n#set( CMAKE_EXE_LINKER_FLAGS_PROFILE \"${CMAKE_EXE_LINKER_FLAGS_RELEASE}\" CACHE STRING \"\" FORCE )\n#set( CMAKE_SHARED_LINKER_FLAGS_PROFILE \"${CMAKE_SHARED_LINKER_FLAGS_RELEASE}\" CACHE STRING \"\" FORCE )\n#mark_as_advanced(CMAKE_CXX_FLAGS_PROFILE\n#                 CMAKE_C_FLAGS_PROFILE\n#                 CMAKE_EXE_LINKER_FLAGS_PROFILE\n#                 CMAKE_SHARED_LINKER_FLAGS_PROFILE )\n#\n# use with $<$<CONFIG:Profile>:semiprof>\n"
  },
  {
    "path": "cmake/cosma.pc.in",
    "content": "prefix=@CMAKE_INSTALL_PREFIX@\nexec_prefix=@CMAKE_INSTALL_PREFIX@\nlibdir=@CMAKE_INSTALL_FULL_LIBDIR@\nincludedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@\n\nName: cosma\nDescription: Distributed communication-optimal matrix-matrix multiplication algorithm \nVersion: @cosma_VERSION@\nLibs: -L${libdir} -lcosma\nCflags: -I${includedir}\n"
  },
  {
    "path": "cmake/cosmaConfig.cmake.in",
    "content": "if(NOT TARGET cosma::cosma)\n    cmake_policy(PUSH) # Save project's policies\n    if(POLICY CMP0074)\n        cmake_policy(SET CMP0074 NEW)\n    endif()\n    include(CMakeFindDependencyMacro)\n\n    # Bundled modules should be found first to prevent conflicts with similarly\n    # named modules in calling projects.\n    #\n    set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR} ${CMAKE_MODULE_PATH})\n\n    set(MKL_ROOT \"@MKL_ROOT@\")\n    set(MPI_DETERMINE_LIBRARY_VERSION TRUE)\n    find_package(MPI COMPONENTS \"CXX\" REQUIRED)\n\n    set(COSMA_BLAS \"@COSMA_BLAS@\")\n    set(COSMA_SCALAPACK \"@COSMA_SCALAPACK@\")\n    set(COSMA_BLAS_VENDOR \"@COSMA_BLAS_VENDOR@\")\n    set(COSMA_BLAS_THREADING \"@COSMA_BLAS_THREADING@\")\n\n    if (\"@COSMA_GPU_BACKEND@\" STREQUAL \"CUDA\" OR \"@COSMA_GPU_BACKEND@\" STREQUAL \"ROCM\")\n      if (${COSMA_BLAS} STREQUAL \"CUDA\")\n        find_dependency(CUDAToolkit)\n      else()\n        find_dependency(hip)\n      endif()\n\n      set(TILEMM_GPU_BACKEND \"@COSMA_GPU_BACKEND@\" CACHE STRING FORCE \"\")\n      find_dependency(Tiled-MM) # bundled\n\n      if (\"@COSMA_WITH_NCCL@\")\n        find_dependency(NCCL)\n      endif()\n\n      if(\"@COSMA_WITH_RCCL@\")\n        find_dependency(rccl)\n      endif()\n    endif ()\n\n    if (NOT @COSMA_BLAS_VENDOR@ MATCHES \"OFF\")\n      find_dependency(Blas)\n    endif()\n\n    if (NOT ${COSMA_SCALAPACK} MATCHES \"OFF\")\n        find_dependency(SCALAPACK)\n    endif ()\n\n    if (\"@COSMA_WITH_PROFILING@\")\n        find_dependency(semiprof)\n    endif ()\n\n    # Clean-up module path.\n    #\n    list(REMOVE_ITEM CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR})\n\n    # These are bundled with cosma\n    #\n    find_dependency(costa)\n\n    include(\"${CMAKE_CURRENT_LIST_DIR}/cosmaTargets.cmake\")\n    cmake_policy(POP) # Restore project's policies\nendif()\n"
  },
  {
    "path": "cmake/find_cuda_version.cmake",
    "content": "# finds CUDA_TOOLKIT_MAJOR_VERSION AND CUDA_TOOLKIT_MINOR_VERSION\nfunction(find_cuda_version)\n    execute_process(COMMAND nvcc --version  OUTPUT_VARIABLE CUDA_VERSION_STRING)\n    string(REGEX MATCH \"release ([0-9]*)\\\\.([0-9]*)\" _ \"${CUDA_VERSION_STRING}\")\n    set(CUDA_TOOLKIT_MAJOR_VERSION ${CMAKE_MATCH_1})\n    set(CUDA_TOOLKIT_MINOR_VERSION ${CMAKE_MATCH_2})\n    message(STATUS \"CUDA_TOOLKIT_MAJOR_VERSION = ${CUDA_TOOLKIT_MAJOR_VERSION}\")\n    message(STATUS \"CUDA_TOOLKIT_MINOR_VERSION = ${CUDA_TOOLKIT_MINOR_VERSION}\")\nendfunction()\n\n"
  },
  {
    "path": "docker/asan/build-env.Dockerfile",
    "content": "FROM ubuntu:20.04\n\nWORKDIR /root\nSHELL [\"/bin/bash\", \"-c\"]\n\nARG MPICH_VERSION=4.0.1\n\nENV DEBIAN_FRONTEND noninteractive\nENV FORCE_UNSAFE_CONFIGURE 1\nENV MPICH_VERSION ${MPICH_VERSION}\n\n# Install basic tools\nRUN apt-get update -qq && apt-get install -qq -y --no-install-recommends \\\n    software-properties-common \\\n    build-essential gfortran pkg-config \\\n    git tar wget curl chrpath && \\\n    rm -rf /var/lib/apt/lists/*\n\n# Install cmake\nRUN wget -qO- \"https://cmake.org/files/v3.22/cmake-3.22.1-linux-x86_64.tar.gz\" | tar --strip-components=1 -xz -C /usr/local\n\n# Install MPICH ABI compatible with Cray's lib on Piz Daint\nRUN wget -q https://www.mpich.org/static/downloads/${MPICH_VERSION}/mpich-${MPICH_VERSION}.tar.gz && \\\n    tar -xzf mpich-${MPICH_VERSION}.tar.gz && \\\n    cd mpich-${MPICH_VERSION} && \\\n    ./configure && \\\n    make install -j$(nproc) && \\\n    rm -rf /root/mpich-${MPICH_VERSION}.tar.gz /root/mpich-${MPICH_VERSION}\n\n# Install OpenBLAS\nARG OPENBLAS_VERSION=0.3.20\nRUN wget -qO - https://github.com/xianyi/OpenBLAS/archive/v${OPENBLAS_VERSION}.tar.gz -O openblas.tar.gz && \\\n    tar -xzf openblas.tar.gz && \\\n    cd OpenBLAS-${OPENBLAS_VERSION}/ && \\\n    make TARGET=HASWELL NO_STATIC=1 DEBUG=1 -j$(nproc) && \\\n    make install TARGET=HASWELL NO_STATIC=1 PREFIX=/usr/local/ && \\\n    rm -rf /root/openblas.tar.gz /root/OpenBLAS-${OPENBLAS_VERSION}/ && \\\n    ldconfig\n\nARG NETLIB_SCALAPACK_VERSION=2.2.0\n\nRUN wget -qO - http://www.netlib.org/scalapack/scalapack-${NETLIB_SCALAPACK_VERSION}.tgz -O scalapack.tar.gz && \\\n    tar -xzf scalapack.tar.gz && \\\n    cd scalapack-${NETLIB_SCALAPACK_VERSION} && \\\n    mkdir build && \\\n    cd build && \\\n    CC=mpicc FC=mpif90 cmake .. \\\n      -DBUILD_STATIC_LIBS=OFF \\\n      -DBUILD_SHARED_LIBS=ON \\\n      -DCMAKE_BUILD_TYPE=Debug && \\\n    make -j$(nproc) && \\\n    make install && \\\n    rm -rf /root/scalapack.tar.gz /root/scalapack-${NETLIB_SCALAPACK_VERSION} && \\\n    ldconfig\n\n# Add deployment tooling\nRUN mkdir -p /opt/libtree && \\\n    curl -Lfso /opt/libtree/libtree https://github.com/haampie/libtree/releases/download/v2.0.0/libtree_x86_64 && \\\n    chmod +x /opt/libtree/libtree\n\n"
  },
  {
    "path": "docker/asan/deploy.Dockerfile",
    "content": "ARG BUILD_ENV\n\nFROM $BUILD_ENV as builder\n\nARG BLAS\n\n# Build COSMA\nCOPY . /COSMA\n\n# reduce the minimum local dimension to allow all mpi ranks to take part \n# in testing\nENV COSMA_MIN_LOCAL_DIMENSION=32\n\nRUN mkdir /COSMA/build && cd /COSMA/build && \\\n    CC=mpicc CXX=mpicxx cmake .. \\\n      -DCOSMA_WITH_TESTS=ON \\\n      -DCOSMA_BLAS=OPENBLAS \\\n      -DCOSMA_SCALAPACK=CUSTOM \\\n      -DCMAKE_BUILD_TYPE=Debug \\\n      -DCMAKE_CXX_FLAGS_DEBUG=\"-g -Og -fno-omit-frame-pointer -fsanitize=address,undefined\" \\\n      -DCMAKE_INSTALL_PREFIX=/root/COSMA-build && \\\n      make -j$(nproc) && \\\n      make install && \\\n      rm -rf /COSMA\n\nRUN /opt/libtree/libtree \\\n      --chrpath \\\n      -d /root/COSMA.bundle/ \\\n      /root/COSMA-build/bin/test.cosma \\\n      /root/COSMA-build/bin/test.mapper \\\n      /root/COSMA-build/bin/test.multiply \\\n      /root/COSMA-build/bin/test.multiply_using_layout \\\n      /root/COSMA-build/bin/test.pdgemm \\\n      /root/COSMA-build/bin/test.scalar_matmul\n\nFROM ubuntu:20.04\n\n# Automatically print stacktraces on segfault\nENV LD_PRELOAD=/lib/x86_64-linux-gnu/libSegFault.so\n\nCOPY --from=builder /root/COSMA.bundle /root/COSMA.bundle\n\n# Make it easy to call our binaries.\nENV PATH=\"/root/COSMA.bundle/usr/bin:$PATH\"\n\nRUN echo \"/root/COSMA.bundle/usr/lib/\" > /etc/ld.so.conf.d/cosma.conf && ldconfig\n\nWORKDIR /root/COSMA.bundle/usr/bin\n\n# I'm not getting ASAN_OPTIONS=suppressions=file to work, so just disable leak detection for now.\nENV ASAN_OPTIONS=detect_leaks=false\n\n\n"
  },
  {
    "path": "docker/cpu-release/build-env.Dockerfile",
    "content": "FROM ubuntu:20.04\n\nWORKDIR /root\nSHELL [\"/bin/bash\", \"-c\"]\n\nARG MKL_VERSION=2020.4-912\nARG MPICH_VERSION=4.0.1\n\nENV DEBIAN_FRONTEND noninteractive\nENV MKLROOT=/opt/intel/compilers_and_libraries/linux/mkl\nENV FORCE_UNSAFE_CONFIGURE 1\nENV MPICH_VERSION ${MPICH_VERSION}\nENV MKL_VERSION ${MKL_VERSION}\n\n# reduce the minimum local dimension to allow all mpi ranks to take part \n# in testing\nENV COSMA_MIN_LOCAL_DIMENSION=32\n\n# Install basic tools\nRUN apt-get update -qq && apt-get install -qq -y --no-install-recommends \\\n    software-properties-common \\\n    build-essential \\\n    git tar wget curl gpg-agent chrpath && \\\n    rm -rf /var/lib/apt/lists/*\n\n# Install cmake\nRUN wget -qO- \"https://cmake.org/files/v3.22/cmake-3.22.1-linux-x86_64.tar.gz\" | tar --strip-components=1 -xz -C /usr/local\n\n# Install MPICH ABI compatible with Cray's lib on Piz Daint\nRUN wget -q https://www.mpich.org/static/downloads/${MPICH_VERSION}/mpich-${MPICH_VERSION}.tar.gz && \\\n    tar -xzf mpich-${MPICH_VERSION}.tar.gz && \\\n    cd mpich-${MPICH_VERSION} && \\\n    ./configure --disable-fortran && \\\n    make install -j$(nproc) && \\\n    rm -rf /root/mpich-${MPICH_VERSION}.tar.gz /root/mpich-${MPICH_VERSION}\n\n# Install MKL\nRUN wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB 2>/dev/null | apt-key add - && \\\n    apt-add-repository 'deb https://apt.repos.intel.com/mkl all main' && \\\n    apt-get install -y -qq --no-install-recommends intel-mkl-64bit-${MKL_VERSION} && \\\n    rm -rf /var/lib/apt/lists/* && \\\n    echo -e \"/opt/intel/lib/intel64\\n/opt/intel/compilers_and_libraries/linux/mkl/lib/intel64\" >> /etc/ld.so.conf.d/intel.conf && \\\n    ldconfig\n\n# Add deployment tooling\nRUN mkdir -p /opt/libtree && \\\n    curl -Lfso /opt/libtree/libtree https://github.com/haampie/libtree/releases/download/v2.0.0/libtree_x86_64 && \\\n    chmod +x /opt/libtree/libtree\n\n"
  },
  {
    "path": "docker/cpu-release/deploy.Dockerfile",
    "content": "ARG BUILD_ENV\n\nFROM $BUILD_ENV as builder\n\n# Build COSMA\nCOPY . /COSMA\n\nRUN source /opt/intel/bin/compilervars.sh intel64 && \\\n    mkdir /COSMA/build && cd /COSMA/build && \\\n    CC=mpicc CXX=mpicxx cmake .. \\\n      -DCOSMA_WITH_TESTS=ON \\\n      -DCOSMA_BLAS=MKL \\\n      -DCOSMA_SCALAPACK=MKL \\\n      -DCMAKE_BUILD_TYPE=Release \\\n      -DCMAKE_INSTALL_PREFIX=/root/COSMA-build && \\\n      make -j$(nproc) && \\\n      make install && \\\n      rm -rf /COSMA\n\nENV MKL_LIB=/opt/intel/compilers_and_libraries/linux/mkl/lib/intel64\n\n# Run linuxdeploy, and add a bunch of libs that are dlopen'ed by mkl\nRUN /opt/libtree/libtree --chrpath --strip -d /root/COSMA.bundle/ \\\n      /root/COSMA-build/bin/test.cosma \\\n      /root/COSMA-build/bin/test.mapper \\\n      /root/COSMA-build/bin/test.multiply \\\n      /root/COSMA-build/bin/test.multiply_using_layout \\\n      /root/COSMA-build/bin/test.pdgemm \\\n      /root/COSMA-build/bin/test.scalar_matmul \\\n      # MKL dlopen's some of their libs, so we have to explicitly copy them over\n      ${MKL_LIB}/libmkl_avx.so \\\n      ${MKL_LIB}/libmkl_avx2.so \\\n      ${MKL_LIB}/libmkl_avx512_mic.so \\\n      ${MKL_LIB}/libmkl_avx512.so \\\n      ${MKL_LIB}/libmkl_core.so \\\n      ${MKL_LIB}/libmkl_def.so \\\n      ${MKL_LIB}/libmkl_intel_thread.so \\\n      ${MKL_LIB}/libmkl_mc.so \\\n      ${MKL_LIB}/libmkl_mc3.so \\\n      ${MKL_LIB}/libmkl_sequential.so \\\n      ${MKL_LIB}/libmkl_tbb_thread.so \\\n      ${MKL_LIB}/libmkl_vml_avx.so \\\n      ${MKL_LIB}/libmkl_vml_avx2.so \\\n      ${MKL_LIB}/libmkl_vml_avx512_mic.so \\\n      ${MKL_LIB}/libmkl_vml_avx512.so \\\n      ${MKL_LIB}/libmkl_vml_cmpt.so \\\n      ${MKL_LIB}/libmkl_vml_def.so \\\n      ${MKL_LIB}/libmkl_vml_mc.so \\\n      ${MKL_LIB}/libmkl_vml_mc3.so\n\nFROM ubuntu:20.04\n\nCOPY --from=builder /root/COSMA.bundle /root/COSMA.bundle\n\n# Automatically print stacktraces on segfault\nENV LD_PRELOAD=/lib/x86_64-linux-gnu/libSegFault.so\n\n# Make it easy to call our binaries.\nENV PATH=\"/root/COSMA.bundle/usr/bin:$PATH\"\n\nRUN echo \"/root/COSMA.bundle/usr/lib/\" > /etc/ld.so.conf.d/cosma.conf && ldconfig\n\nWORKDIR /root/COSMA.bundle/usr/bin\n"
  },
  {
    "path": "docker/gpu/build-env.Dockerfile",
    "content": "FROM nvidia/cuda:11.6.2-devel-ubuntu20.04\n\nWORKDIR /root\nSHELL [\"/bin/bash\", \"-c\"]\n\nARG MPICH_VERSION=4.0.1\nARG OPENBLAS_VERSION=0.3.20\nARG NETLIB_SCALAPACK_VERSION=2.2.0\n\nENV DEBIAN_FRONTEND noninteractive\nENV MKLROOT=/opt/intel/compilers_and_libraries/linux/mkl\nENV FORCE_UNSAFE_CONFIGURE 1\nENV MPICH_VERSION ${MPICH_VERSION}\nENV MKL_VERSION ${MKL_VERSION}\n\n# reduce the minimum local dimension to allow all mpi ranks to take part \n# in testing\nENV COSMA_MIN_LOCAL_DIMENSION=32\n\n# Install basic tools\nRUN apt-get update -qq && \\\n    apt-get install -qq -y --no-install-recommends \\\n      software-properties-common \\\n      build-essential gfortran pkg-config \\\n      git tar wget curl chrpath && \\\n    rm -rf /var/lib/apt/lists/*\n\n# Install cmake\nRUN wget -qO- \"https://cmake.org/files/v3.22/cmake-3.22.1-linux-x86_64.tar.gz\" | tar --strip-components=1 -xz -C /usr/local\n\n# Install MPICH ABI compatible with Cray's lib on Piz Daint\nRUN wget -q https://www.mpich.org/static/downloads/${MPICH_VERSION}/mpich-${MPICH_VERSION}.tar.gz && \\\n    tar -xzf mpich-${MPICH_VERSION}.tar.gz && \\\n    cd mpich-${MPICH_VERSION} && \\\n    ./configure && \\\n    make install -j$(nproc) && \\\n    rm -rf /root/mpich-${MPICH_VERSION}.tar.gz /root/mpich-${MPICH_VERSION}\n\n# Install OpenBLAS\nRUN wget -qO - https://github.com/xianyi/OpenBLAS/archive/v${OPENBLAS_VERSION}.tar.gz -O openblas.tar.gz && \\\n    tar -xzf openblas.tar.gz && \\\n    cd OpenBLAS-${OPENBLAS_VERSION}/ && \\\n    make TARGET=HASWELL NO_STATIC=1 -j$(nproc) && \\\n    make install TARGET=HASWELL NO_STATIC=1 PREFIX=/usr/local/ && \\\n    rm -rf /root/openblas.tar.gz /root/OpenBLAS-${OPENBLAS_VERSION}/ && \\\n    ldconfig\n\nRUN wget -qO - http://www.netlib.org/scalapack/scalapack-${NETLIB_SCALAPACK_VERSION}.tgz -O scalapack.tar.gz && \\\n    tar -xzf scalapack.tar.gz && \\\n    cd scalapack-${NETLIB_SCALAPACK_VERSION} && \\\n    mkdir build && \\\n    cd build && \\\n    CC=mpicc FC=mpif90 cmake .. \\\n      -DBUILD_STATIC_LIBS=OFF \\\n      -DBUILD_SHARED_LIBS=ON \\\n      -DCMAKE_BUILD_TYPE=Release && \\\n    make -j$(nproc) && \\\n    make install && \\\n    rm -rf /root/scalapack.tar.gz /root/scalapack-${NETLIB_SCALAPACK_VERSION} && \\\n    ldconfig\n\n# Add deployment tooling\nRUN mkdir -p /opt/libtree && \\\n    curl -Lfso /opt/libtree/libtree https://github.com/haampie/libtree/releases/download/v2.0.0/libtree_x86_64 && \\\n    chmod +x /opt/libtree/libtree\n\n"
  },
  {
    "path": "docker/gpu/deploy.Dockerfile",
    "content": "ARG BUILD_ENV\n\nFROM $BUILD_ENV as builder\n\nARG BLAS\n\n# Build COSMA\nCOPY . /COSMA\n\nRUN mkdir /COSMA/build && cd /COSMA/build && \\\n    CC=mpicc CXX=mpicxx cmake .. \\\n      -DCOSMA_WITH_TESTS=ON \\\n      -DCUDA_PATH=/usr/local/cuda \\\n      -DCOSMA_BLAS=CUDA \\\n      -DCOSMA_SCALAPACK=CUSTOM \\\n      -DCMAKE_BUILD_TYPE=Release \\\n      -DCMAKE_INSTALL_PREFIX=/root/COSMA-build && \\\n      make -j$(nproc) && \\\n      make install && \\\n      rm -rf /COSMA\n\n# Run linuxdeploy, and add a bunch of libs that are dlopen'ed by mkl\nRUN /opt/libtree/libtree \\\n      -d /root/COSMA.bundle/ \\\n      --chrpath \\\n      --strip \\\n      /root/COSMA-build/bin/test.cosma \\\n      /root/COSMA-build/bin/test.mapper \\\n      /root/COSMA-build/bin/test.multiply \\\n      /root/COSMA-build/bin/test.multiply_using_layout \\\n      /root/COSMA-build/bin/test.pdgemm \\\n      /root/COSMA-build/bin/test.scalar_matmul\n\nFROM ubuntu:20.04\n\n# This is the only thing necessary really from nvidia/cuda's ubuntu18.04 runtime image\nENV NVIDIA_VISIBLE_DEVICES all\nENV NVIDIA_DRIVER_CAPABILITIES compute,utility\nENV NVIDIA_REQUIRE_CUDA \"cuda>=10.2\"\n\n# Automatically print stacktraces on segfault\nENV LD_PRELOAD=/lib/x86_64-linux-gnu/libSegFault.so\n\nCOPY --from=builder /root/COSMA.bundle /root/COSMA.bundle\n\n# Make it easy to call our binaries.\nENV PATH=\"/root/COSMA.bundle/usr/bin:$PATH\"\n\nRUN echo \"/root/COSMA.bundle/usr/lib/\" > /etc/ld.so.conf.d/cosma.conf && ldconfig\n\nWORKDIR /root/COSMA.bundle/usr/bin\n"
  },
  {
    "path": "libs/gtest_mpi/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.24 FATAL_ERROR)\nproject(gtest_mpi CXX)\n\nadd_subdirectory(external/gtest)\nadd_library(gtest_mpi INTERFACE)\ntarget_include_directories(gtest_mpi INTERFACE ${gtest_mpi_SOURCE_DIR}/include)\ntarget_link_libraries(gtest_mpi INTERFACE gtest)\ntarget_compile_features(gtest_mpi INTERFACE cxx_std_11)\n"
  },
  {
    "path": "libs/gtest_mpi/LICENSE",
    "content": "This project contains source code from the Googletest framework\nobtained from https:github.com/google/googletest with the following\nterms:\n\nCopyright 2005, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n    * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n---------------------------------------------------------------------\n\nModifications and additions are published under the following terms:\n\nCopyright 2019, Simon Frasch\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n    * Neither the name of the copyright holder nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n---------------------------------------------------------------------\n"
  },
  {
    "path": "libs/gtest_mpi/README.md",
    "content": "# GTest MPI\nThis project provides an extension to the Google Test framework, to allow testing of MPI enabled applications. \nThe implementation includes a custom MPI environment and listener, with which all test failure messages are collected on the root process and the output includes the rank index for each failure.\n\n\n## Requirements\n- Google Test version 1.8.1 (other versions may work as well, depending on changes to Listener or Environment interfaces)\n- A MPI library\n- At least C++ 11.\n- Linux or macOS\n\n## Limitations\n- All ranks MUST execute all tests in the same order. Within a test, the executed assertions may differ. If a test should run only on a subset of ranks, the excluded ranks must enter the test, but may exit immediately.\n- Logging features of Google Test are not supported\n\n\n\n## Example\n```\n#include <mpi.h>\n#include \"gtest/gtest.h\"\n#include \"gtest_mpi/gtest_mpi.hpp\"\n\nint main(int argc, char* argv[]) {\n  // Initialize MPI before any call to gtest_mpi\n  MPI_Init(&argc, &argv);\n\n  // Intialize google test\n  ::testing::InitGoogleTest(&argc, argv);\n\n  // Add a test environment, which will initialize a test communicator\n  // (a duplicate of MPI_COMM_WORLD)\n  ::testing::AddGlobalTestEnvironment(new gtest_mpi::MPITestEnvironment());\n\n  auto& test_listeners = ::testing::UnitTest::GetInstance()->listeners();\n\n  // Remove default listener and replace with the custom MPI listener\n  delete test_listeners.Release(test_listeners.default_result_printer());\n  test_listeners.Append(new gtest_mpi::PrettyMPIUnitTestResultPrinter());\n\n  // run tests\n  auto exit_code = RUN_ALL_TESTS();\n\n  // Finalize MPI before exiting\n  MPI_Finalize();\n\n  return exit_code;\n}\n```\n\n# License\n```\nThis project contains source code from the Googletest framework\nobtained from https:github.com/google/googletest with the following\nterms:\n\nCopyright 2005, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n    * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n---------------------------------------------------------------------\n\nModifications and additions are published under the following terms:\n\nCopyright 2019, Simon Frasch\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n    * Neither the name of the copyright holder nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n---------------------------------------------------------------------\n```\n"
  },
  {
    "path": "libs/gtest_mpi/external/gtest/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.24 FATAL_ERROR)\nproject(gtest CXX)\n\nadd_library(gtest STATIC src/gtest-all.cpp)\ntarget_include_directories(gtest PUBLIC ${gtest_SOURCE_DIR}/include)\ntarget_compile_features(gtest PUBLIC cxx_std_11)\n"
  },
  {
    "path": "libs/gtest_mpi/external/gtest/include/gtest/gtest.h",
    "content": "// Copyright 2005, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan)\n//\n// The Google C++ Testing Framework (Google Test)\n//\n// This header file defines the public API for Google Test.  It should be\n// included by any test program that uses Google Test.\n//\n// IMPORTANT NOTE: Due to limitation of the C++ language, we have to\n// leave some internal implementation details in this header file.\n// They are clearly marked by comments like this:\n//\n//   // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\n//\n// Such code is NOT meant to be used by a user directly, and is subject\n// to CHANGE WITHOUT NOTICE.  Therefore DO NOT DEPEND ON IT in a user\n// program!\n//\n// Acknowledgment: Google Test borrowed the idea of automatic test\n// registration from Barthelemy Dagenais' (barthelemy@prologique.com)\n// easyUnit framework.\n\n#ifndef GTEST_INCLUDE_GTEST_GTEST_H_\n#define GTEST_INCLUDE_GTEST_GTEST_H_\n\n#include <limits>\n#include <ostream>\n#include <vector>\n\n// Copyright 2005, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)\n//\n// The Google C++ Testing Framework (Google Test)\n//\n// This header file declares functions and macros used internally by\n// Google Test.  They are subject to change without notice.\n\n#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_\n#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_\n\n// Copyright 2005, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Authors: wan@google.com (Zhanyong Wan)\n//\n// Low-level types and utilities for porting Google Test to various\n// platforms.  All macros ending with _ and symbols defined in an\n// internal namespace are subject to change without notice.  Code\n// outside Google Test MUST NOT USE THEM DIRECTLY.  Macros that don't\n// end with _ are part of Google Test's public API and can be used by\n// code outside Google Test.\n//\n// This file is fundamental to Google Test.  All other Google Test source\n// files are expected to #include this.  Therefore, it cannot #include\n// any other Google Test header.\n\n#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_\n#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_\n\n// Environment-describing macros\n// -----------------------------\n//\n// Google Test can be used in many different environments.  Macros in\n// this section tell Google Test what kind of environment it is being\n// used in, such that Google Test can provide environment-specific\n// features and implementations.\n//\n// Google Test tries to automatically detect the properties of its\n// environment, so users usually don't need to worry about these\n// macros.  However, the automatic detection is not perfect.\n// Sometimes it's necessary for a user to define some of the following\n// macros in the build script to override Google Test's decisions.\n//\n// If the user doesn't define a macro in the list, Google Test will\n// provide a default definition.  After this header is #included, all\n// macros in this list will be defined to either 1 or 0.\n//\n// Notes to maintainers:\n//   - Each macro here is a user-tweakable knob; do not grow the list\n//     lightly.\n//   - Use #if to key off these macros.  Don't use #ifdef or \"#if\n//     defined(...)\", which will not work as these macros are ALWAYS\n//     defined.\n//\n//   GTEST_HAS_CLONE          - Define it to 1/0 to indicate that clone(2)\n//                              is/isn't available.\n//   GTEST_HAS_EXCEPTIONS     - Define it to 1/0 to indicate that exceptions\n//                              are enabled.\n//   GTEST_HAS_GLOBAL_STRING  - Define it to 1/0 to indicate that ::string\n//                              is/isn't available (some systems define\n//                              ::string, which is different to std::string).\n//   GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string\n//                              is/isn't available (some systems define\n//                              ::wstring, which is different to std::wstring).\n//   GTEST_HAS_POSIX_RE       - Define it to 1/0 to indicate that POSIX regular\n//                              expressions are/aren't available.\n//   GTEST_HAS_PTHREAD        - Define it to 1/0 to indicate that <pthread.h>\n//                              is/isn't available.\n//   GTEST_HAS_RTTI           - Define it to 1/0 to indicate that RTTI is/isn't\n//                              enabled.\n//   GTEST_HAS_STD_WSTRING    - Define it to 1/0 to indicate that\n//                              std::wstring does/doesn't work (Google Test can\n//                              be used where std::wstring is unavailable).\n//   GTEST_HAS_TR1_TUPLE      - Define it to 1/0 to indicate tr1::tuple\n//                              is/isn't available.\n//   GTEST_HAS_SEH            - Define it to 1/0 to indicate whether the\n//                              compiler supports Microsoft's \"Structured\n//                              Exception Handling\".\n//   GTEST_HAS_STREAM_REDIRECTION\n//                            - Define it to 1/0 to indicate whether the\n//                              platform supports I/O stream redirection using\n//                              dup() and dup2().\n//   GTEST_USE_OWN_TR1_TUPLE  - Define it to 1/0 to indicate whether Google\n//                              Test's own tr1 tuple implementation should be\n//                              used.  Unused when the user sets\n//                              GTEST_HAS_TR1_TUPLE to 0.\n//   GTEST_LANG_CXX11         - Define it to 1/0 to indicate that Google Test\n//                              is building in C++11/C++98 mode.\n//   GTEST_LINKED_AS_SHARED_LIBRARY\n//                            - Define to 1 when compiling tests that use\n//                              Google Test as a shared library (known as\n//                              DLL on Windows).\n//   GTEST_CREATE_SHARED_LIBRARY\n//                            - Define to 1 when compiling Google Test itself\n//                              as a shared library.\n\n// Platform-indicating macros\n// --------------------------\n//\n// Macros indicating the platform on which Google Test is being used\n// (a macro is defined to 1 if compiled on the given platform;\n// otherwise UNDEFINED -- it's never defined to 0.).  Google Test\n// defines these macros automatically.  Code outside Google Test MUST\n// NOT define them.\n//\n//   GTEST_OS_AIX      - IBM AIX\n//   GTEST_OS_CYGWIN   - Cygwin\n//   GTEST_OS_FREEBSD  - FreeBSD\n//   GTEST_OS_HPUX     - HP-UX\n//   GTEST_OS_LINUX    - Linux\n//     GTEST_OS_LINUX_ANDROID - Google Android\n//   GTEST_OS_MAC      - Mac OS X\n//     GTEST_OS_IOS    - iOS\n//   GTEST_OS_NACL     - Google Native Client (NaCl)\n//   GTEST_OS_OPENBSD  - OpenBSD\n//   GTEST_OS_QNX      - QNX\n//   GTEST_OS_SOLARIS  - Sun Solaris\n//   GTEST_OS_SYMBIAN  - Symbian\n//   GTEST_OS_WINDOWS  - Windows (Desktop, MinGW, or Mobile)\n//     GTEST_OS_WINDOWS_DESKTOP  - Windows Desktop\n//     GTEST_OS_WINDOWS_MINGW    - MinGW\n//     GTEST_OS_WINDOWS_MOBILE   - Windows Mobile\n//     GTEST_OS_WINDOWS_PHONE    - Windows Phone\n//     GTEST_OS_WINDOWS_RT       - Windows Store App/WinRT\n//   GTEST_OS_ZOS      - z/OS\n//\n// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the\n// most stable support.  Since core members of the Google Test project\n// don't have access to other platforms, support for them may be less\n// stable.  If you notice any problems on your platform, please notify\n// googletestframework@googlegroups.com (patches for fixing them are\n// even more welcome!).\n//\n// It is possible that none of the GTEST_OS_* macros are defined.\n\n// Feature-indicating macros\n// -------------------------\n//\n// Macros indicating which Google Test features are available (a macro\n// is defined to 1 if the corresponding feature is supported;\n// otherwise UNDEFINED -- it's never defined to 0.).  Google Test\n// defines these macros automatically.  Code outside Google Test MUST\n// NOT define them.\n//\n// These macros are public so that portable tests can be written.\n// Such tests typically surround code using a feature with an #if\n// which controls that code.  For example:\n//\n// #if GTEST_HAS_DEATH_TEST\n//   EXPECT_DEATH(DoSomethingDeadly());\n// #endif\n//\n//   GTEST_HAS_COMBINE      - the Combine() function (for value-parameterized\n//                            tests)\n//   GTEST_HAS_DEATH_TEST   - death tests\n//   GTEST_HAS_PARAM_TEST   - value-parameterized tests\n//   GTEST_HAS_TYPED_TEST   - typed tests\n//   GTEST_HAS_TYPED_TEST_P - type-parameterized tests\n//   GTEST_IS_THREADSAFE    - Google Test is thread-safe.\n//   GTEST_USES_POSIX_RE    - enhanced POSIX regex is used. Do not confuse with\n//                            GTEST_HAS_POSIX_RE (see above) which users can\n//                            define themselves.\n//   GTEST_USES_SIMPLE_RE   - our own simple regex is used;\n//                            the above two are mutually exclusive.\n//   GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ().\n\n// Misc public macros\n// ------------------\n//\n//   GTEST_FLAG(flag_name)  - references the variable corresponding to\n//                            the given Google Test flag.\n\n// Internal utilities\n// ------------------\n//\n// The following macros and utilities are for Google Test's INTERNAL\n// use only.  Code outside Google Test MUST NOT USE THEM DIRECTLY.\n//\n// Macros for basic C++ coding:\n//   GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning.\n//   GTEST_ATTRIBUTE_UNUSED_  - declares that a class' instances or a\n//                              variable don't have to be used.\n//   GTEST_DISALLOW_ASSIGN_   - disables operator=.\n//   GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=.\n//   GTEST_MUST_USE_RESULT_   - declares that a function's result must be used.\n//   GTEST_INTENTIONAL_CONST_COND_PUSH_ - start code section where MSVC C4127 is\n//                                        suppressed (constant conditional).\n//   GTEST_INTENTIONAL_CONST_COND_POP_  - finish code section where MSVC C4127\n//                                        is suppressed.\n//\n// C++11 feature wrappers:\n//\n//   testing::internal::move  - portability wrapper for std::move.\n//\n// Synchronization:\n//   Mutex, MutexLock, ThreadLocal, GetThreadCount()\n//                            - synchronization primitives.\n//\n// Template meta programming:\n//   is_pointer     - as in TR1; needed on Symbian and IBM XL C/C++ only.\n//   IteratorTraits - partial implementation of std::iterator_traits, which\n//                    is not available in libCstd when compiled with Sun C++.\n//\n// Smart pointers:\n//   scoped_ptr     - as in TR2.\n//\n// Regular expressions:\n//   RE             - a simple regular expression class using the POSIX\n//                    Extended Regular Expression syntax on UNIX-like\n//                    platforms, or a reduced regular exception syntax on\n//                    other platforms, including Windows.\n//\n// Logging:\n//   GTEST_LOG_()   - logs messages at the specified severity level.\n//   LogToStderr()  - directs all log messages to stderr.\n//   FlushInfoLog() - flushes informational log messages.\n//\n// Stdout and stderr capturing:\n//   CaptureStdout()     - starts capturing stdout.\n//   GetCapturedStdout() - stops capturing stdout and returns the captured\n//                         string.\n//   CaptureStderr()     - starts capturing stderr.\n//   GetCapturedStderr() - stops capturing stderr and returns the captured\n//                         string.\n//\n// Integer types:\n//   TypeWithSize   - maps an integer to a int type.\n//   Int32, UInt32, Int64, UInt64, TimeInMillis\n//                  - integers of known sizes.\n//   BiggestInt     - the biggest signed integer type.\n//\n// Command-line utilities:\n//   GTEST_DECLARE_*()  - declares a flag.\n//   GTEST_DEFINE_*()   - defines a flag.\n//   GetInjectableArgvs() - returns the command line as a vector of strings.\n//\n// Environment variable utilities:\n//   GetEnv()             - gets the value of an environment variable.\n//   BoolFromGTestEnv()   - parses a bool environment variable.\n//   Int32FromGTestEnv()  - parses an Int32 environment variable.\n//   StringFromGTestEnv() - parses a string environment variable.\n\n#include <ctype.h>   // for isspace, etc\n#include <stddef.h>  // for ptrdiff_t\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n#ifndef _WIN32_WCE\n# include <sys/types.h>\n# include <sys/stat.h>\n#endif  // !_WIN32_WCE\n\n#if defined __APPLE__\n# include <AvailabilityMacros.h>\n# include <TargetConditionals.h>\n#endif\n\n#include <algorithm>  // NOLINT\n#include <iostream>  // NOLINT\n#include <sstream>  // NOLINT\n#include <string>  // NOLINT\n#include <utility>\n#include <vector>  // NOLINT\n\n// Copyright 2015, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// The Google C++ Testing Framework (Google Test)\n//\n// This header file defines the GTEST_OS_* macro.\n// It is separate from gtest-port.h so that custom/gtest-port.h can include it.\n\n#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_\n#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_\n\n// Determines the platform on which Google Test is compiled.\n#ifdef __CYGWIN__\n# define GTEST_OS_CYGWIN 1\n#elif defined __SYMBIAN32__\n# define GTEST_OS_SYMBIAN 1\n#elif defined _WIN32\n# define GTEST_OS_WINDOWS 1\n# ifdef _WIN32_WCE\n#  define GTEST_OS_WINDOWS_MOBILE 1\n# elif defined(__MINGW__) || defined(__MINGW32__)\n#  define GTEST_OS_WINDOWS_MINGW 1\n# elif defined(WINAPI_FAMILY)\n#  include <winapifamily.h>\n#  if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)\n#   define GTEST_OS_WINDOWS_DESKTOP 1\n#  elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_PHONE_APP)\n#   define GTEST_OS_WINDOWS_PHONE 1\n#  elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)\n#   define GTEST_OS_WINDOWS_RT 1\n#  else\n    // WINAPI_FAMILY defined but no known partition matched.\n    // Default to desktop.\n#   define GTEST_OS_WINDOWS_DESKTOP 1\n#  endif\n# else\n#  define GTEST_OS_WINDOWS_DESKTOP 1\n# endif  // _WIN32_WCE\n#elif defined __APPLE__\n# define GTEST_OS_MAC 1\n# if TARGET_OS_IPHONE\n#  define GTEST_OS_IOS 1\n# endif\n#elif defined __FreeBSD__\n# define GTEST_OS_FREEBSD 1\n#elif defined __linux__\n# define GTEST_OS_LINUX 1\n# if defined __ANDROID__\n#  define GTEST_OS_LINUX_ANDROID 1\n# endif\n#elif defined __MVS__\n# define GTEST_OS_ZOS 1\n#elif defined(__sun) && defined(__SVR4)\n# define GTEST_OS_SOLARIS 1\n#elif defined(_AIX)\n# define GTEST_OS_AIX 1\n#elif defined(__hpux)\n# define GTEST_OS_HPUX 1\n#elif defined __native_client__\n# define GTEST_OS_NACL 1\n#elif defined __OpenBSD__\n# define GTEST_OS_OPENBSD 1\n#elif defined __QNX__\n# define GTEST_OS_QNX 1\n#endif  // __CYGWIN__\n\n#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_\n// Copyright 2015, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Injection point for custom user configurations.\n// The following macros can be defined:\n//\n//   Flag related macros:\n//     GTEST_FLAG(flag_name)\n//     GTEST_USE_OWN_FLAGFILE_FLAG_  - Define to 0 when the system provides its\n//                                     own flagfile flag parsing.\n//     GTEST_DECLARE_bool_(name)\n//     GTEST_DECLARE_int32_(name)\n//     GTEST_DECLARE_string_(name)\n//     GTEST_DEFINE_bool_(name, default_val, doc)\n//     GTEST_DEFINE_int32_(name, default_val, doc)\n//     GTEST_DEFINE_string_(name, default_val, doc)\n//\n//   Test filtering:\n//     GTEST_TEST_FILTER_ENV_VAR_ - The name of an environment variable that\n//                                  will be used if --GTEST_FLAG(test_filter)\n//                                  is not provided.\n//\n//   Logging:\n//     GTEST_LOG_(severity)\n//     GTEST_CHECK_(condition)\n//     Functions LogToStderr() and FlushInfoLog() have to be provided too.\n//\n//   Threading:\n//     GTEST_HAS_NOTIFICATION_ - Enabled if Notification is already provided.\n//     GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ - Enabled if Mutex and ThreadLocal are\n//                                         already provided.\n//     Must also provide GTEST_DECLARE_STATIC_MUTEX_(mutex) and\n//     GTEST_DEFINE_STATIC_MUTEX_(mutex)\n//\n//     GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks)\n//     GTEST_LOCK_EXCLUDED_(locks)\n//\n// ** Custom implementation starts here **\n\n#ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_\n#define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_\n\n#endif  // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_\n\n#if !defined(GTEST_DEV_EMAIL_)\n# define GTEST_DEV_EMAIL_ \"googletestframework@@googlegroups.com\"\n# define GTEST_FLAG_PREFIX_ \"gtest_\"\n# define GTEST_FLAG_PREFIX_DASH_ \"gtest-\"\n# define GTEST_FLAG_PREFIX_UPPER_ \"GTEST_\"\n# define GTEST_NAME_ \"Google Test\"\n# define GTEST_PROJECT_URL_ \"http://code.google.com/p/googletest/\"\n#endif  // !defined(GTEST_DEV_EMAIL_)\n\n#if !defined(GTEST_INIT_GOOGLE_TEST_NAME_)\n# define GTEST_INIT_GOOGLE_TEST_NAME_ \"testing::InitGoogleTest\"\n#endif  // !defined(GTEST_INIT_GOOGLE_TEST_NAME_)\n\n// Determines the version of gcc that is used to compile this.\n#ifdef __GNUC__\n// 40302 means version 4.3.2.\n# define GTEST_GCC_VER_ \\\n    (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__)\n#endif  // __GNUC__\n\n// Macros for disabling Microsoft Visual C++ warnings.\n//\n//   GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 4385)\n//   /* code that triggers warnings C4800 and C4385 */\n//   GTEST_DISABLE_MSC_WARNINGS_POP_()\n#if _MSC_VER >= 1500\n# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings) \\\n    __pragma(warning(push))                        \\\n    __pragma(warning(disable: warnings))\n# define GTEST_DISABLE_MSC_WARNINGS_POP_()          \\\n    __pragma(warning(pop))\n#else\n// Older versions of MSVC don't have __pragma.\n# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings)\n# define GTEST_DISABLE_MSC_WARNINGS_POP_()\n#endif\n\n#ifndef GTEST_LANG_CXX11\n// gcc and clang define __GXX_EXPERIMENTAL_CXX0X__ when\n// -std={c,gnu}++{0x,11} is passed.  The C++11 standard specifies a\n// value for __cplusplus, and recent versions of clang, gcc, and\n// probably other compilers set that too in C++11 mode.\n# if __GXX_EXPERIMENTAL_CXX0X__ || __cplusplus >= 201103L\n// Compiling in at least C++11 mode.\n#  define GTEST_LANG_CXX11 1\n# else\n#  define GTEST_LANG_CXX11 0\n# endif\n#endif\n\n// Distinct from C++11 language support, some environments don't provide\n// proper C++11 library support. Notably, it's possible to build in\n// C++11 mode when targeting Mac OS X 10.6, which has an old libstdc++\n// with no C++11 support.\n//\n// libstdc++ has sufficient C++11 support as of GCC 4.6.0, __GLIBCXX__\n// 20110325, but maintenance releases in the 4.4 and 4.5 series followed\n// this date, so check for those versions by their date stamps.\n// https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html#abi.versioning\n#if GTEST_LANG_CXX11 && \\\n    (!defined(__GLIBCXX__) || ( \\\n        __GLIBCXX__ >= 20110325ul &&  /* GCC >= 4.6.0 */ \\\n        /* Blacklist of patch releases of older branches: */ \\\n        __GLIBCXX__ != 20110416ul &&  /* GCC 4.4.6 */ \\\n        __GLIBCXX__ != 20120313ul &&  /* GCC 4.4.7 */ \\\n        __GLIBCXX__ != 20110428ul &&  /* GCC 4.5.3 */ \\\n        __GLIBCXX__ != 20120702ul))   /* GCC 4.5.4 */\n# define GTEST_STDLIB_CXX11 1\n#endif\n\n// Only use C++11 library features if the library provides them.\n#if GTEST_STDLIB_CXX11\n# define GTEST_HAS_STD_BEGIN_AND_END_ 1\n# define GTEST_HAS_STD_FORWARD_LIST_ 1\n# define GTEST_HAS_STD_FUNCTION_ 1\n# define GTEST_HAS_STD_INITIALIZER_LIST_ 1\n# define GTEST_HAS_STD_MOVE_ 1\n# define GTEST_HAS_STD_SHARED_PTR_ 1\n# define GTEST_HAS_STD_TYPE_TRAITS_ 1\n# define GTEST_HAS_STD_UNIQUE_PTR_ 1\n#endif\n\n// C++11 specifies that <tuple> provides std::tuple.\n// Some platforms still might not have it, however.\n#if GTEST_LANG_CXX11\n# define GTEST_HAS_STD_TUPLE_ 1\n# if defined(__clang__)\n// Inspired by http://clang.llvm.org/docs/LanguageExtensions.html#__has_include\n#  if defined(__has_include) && !__has_include(<tuple>)\n#   undef GTEST_HAS_STD_TUPLE_\n#  endif\n# elif defined(_MSC_VER)\n// Inspired by boost/config/stdlib/dinkumware.hpp\n#  if defined(_CPPLIB_VER) && _CPPLIB_VER < 520\n#   undef GTEST_HAS_STD_TUPLE_\n#  endif\n# elif defined(__GLIBCXX__)\n// Inspired by boost/config/stdlib/libstdcpp3.hpp,\n// http://gcc.gnu.org/gcc-4.2/changes.html and\n// http://gcc.gnu.org/onlinedocs/libstdc++/manual/bk01pt01ch01.html#manual.intro.status.standard.200x\n#  if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 2)\n#   undef GTEST_HAS_STD_TUPLE_\n#  endif\n# endif\n#endif\n\n// Brings in definitions for functions used in the testing::internal::posix\n// namespace (read, write, close, chdir, isatty, stat). We do not currently\n// use them on Windows Mobile.\n#if GTEST_OS_WINDOWS\n# if !GTEST_OS_WINDOWS_MOBILE\n#  include <direct.h>\n#  include <io.h>\n# endif\n// In order to avoid having to include <windows.h>, use forward declaration\n// assuming CRITICAL_SECTION is a typedef of _RTL_CRITICAL_SECTION.\n// This assumption is verified by\n// WindowsTypesTest.CRITICAL_SECTIONIs_RTL_CRITICAL_SECTION.\nstruct _RTL_CRITICAL_SECTION;\n#else\n// This assumes that non-Windows OSes provide unistd.h. For OSes where this\n// is not the case, we need to include headers that provide the functions\n// mentioned above.\n# include <unistd.h>\n# include <strings.h>\n#endif  // GTEST_OS_WINDOWS\n\n#if GTEST_OS_LINUX_ANDROID\n// Used to define __ANDROID_API__ matching the target NDK API level.\n#  include <android/api-level.h>  // NOLINT\n#endif\n\n// Defines this to true iff Google Test can use POSIX regular expressions.\n#ifndef GTEST_HAS_POSIX_RE\n# if GTEST_OS_LINUX_ANDROID\n// On Android, <regex.h> is only available starting with Gingerbread.\n#  define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 9)\n# else\n#  define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS)\n# endif\n#endif\n\n#if GTEST_USES_PCRE\n// The appropriate headers have already been included.\n\n#elif GTEST_HAS_POSIX_RE\n\n// On some platforms, <regex.h> needs someone to define size_t, and\n// won't compile otherwise.  We can #include it here as we already\n// included <stdlib.h>, which is guaranteed to define size_t through\n// <stddef.h>.\n# include <regex.h>  // NOLINT\n\n# define GTEST_USES_POSIX_RE 1\n\n#elif GTEST_OS_WINDOWS\n\n// <regex.h> is not available on Windows.  Use our own simple regex\n// implementation instead.\n# define GTEST_USES_SIMPLE_RE 1\n\n#else\n\n// <regex.h> may not be available on this platform.  Use our own\n// simple regex implementation instead.\n# define GTEST_USES_SIMPLE_RE 1\n\n#endif  // GTEST_USES_PCRE\n\n#ifndef GTEST_HAS_EXCEPTIONS\n// The user didn't tell us whether exceptions are enabled, so we need\n// to figure it out.\n# if defined(_MSC_VER) || defined(__BORLANDC__)\n// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS\n// macro to enable exceptions, so we'll do the same.\n// Assumes that exceptions are enabled by default.\n#  ifndef _HAS_EXCEPTIONS\n#   define _HAS_EXCEPTIONS 1\n#  endif  // _HAS_EXCEPTIONS\n#  define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS\n# elif defined(__clang__)\n// clang defines __EXCEPTIONS iff exceptions are enabled before clang 220714,\n// but iff cleanups are enabled after that. In Obj-C++ files, there can be\n// cleanups for ObjC exceptions which also need cleanups, even if C++ exceptions\n// are disabled. clang has __has_feature(cxx_exceptions) which checks for C++\n// exceptions starting at clang r206352, but which checked for cleanups prior to\n// that. To reliably check for C++ exception availability with clang, check for\n// __EXCEPTIONS && __has_feature(cxx_exceptions).\n#  define GTEST_HAS_EXCEPTIONS (__EXCEPTIONS && __has_feature(cxx_exceptions))\n# elif defined(__GNUC__) && __EXCEPTIONS\n// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled.\n#  define GTEST_HAS_EXCEPTIONS 1\n# elif defined(__SUNPRO_CC)\n// Sun Pro CC supports exceptions.  However, there is no compile-time way of\n// detecting whether they are enabled or not.  Therefore, we assume that\n// they are enabled unless the user tells us otherwise.\n#  define GTEST_HAS_EXCEPTIONS 1\n# elif defined(__IBMCPP__) && __EXCEPTIONS\n// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled.\n#  define GTEST_HAS_EXCEPTIONS 1\n# elif defined(__HP_aCC)\n// Exception handling is in effect by default in HP aCC compiler. It has to\n// be turned of by +noeh compiler option if desired.\n#  define GTEST_HAS_EXCEPTIONS 1\n# else\n// For other compilers, we assume exceptions are disabled to be\n// conservative.\n#  define GTEST_HAS_EXCEPTIONS 0\n# endif  // defined(_MSC_VER) || defined(__BORLANDC__)\n#endif  // GTEST_HAS_EXCEPTIONS\n\n#if !defined(GTEST_HAS_STD_STRING)\n// Even though we don't use this macro any longer, we keep it in case\n// some clients still depend on it.\n# define GTEST_HAS_STD_STRING 1\n#elif !GTEST_HAS_STD_STRING\n// The user told us that ::std::string isn't available.\n# error \"Google Test cannot be used where ::std::string isn't available.\"\n#endif  // !defined(GTEST_HAS_STD_STRING)\n\n#ifndef GTEST_HAS_GLOBAL_STRING\n// The user didn't tell us whether ::string is available, so we need\n// to figure it out.\n\n# define GTEST_HAS_GLOBAL_STRING 0\n\n#endif  // GTEST_HAS_GLOBAL_STRING\n\n#ifndef GTEST_HAS_STD_WSTRING\n// The user didn't tell us whether ::std::wstring is available, so we need\n// to figure it out.\n// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring\n//   is available.\n\n// Cygwin 1.7 and below doesn't support ::std::wstring.\n// Solaris' libc++ doesn't support it either.  Android has\n// no support for it at least as recent as Froyo (2.2).\n# define GTEST_HAS_STD_WSTRING \\\n    (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS))\n\n#endif  // GTEST_HAS_STD_WSTRING\n\n#ifndef GTEST_HAS_GLOBAL_WSTRING\n// The user didn't tell us whether ::wstring is available, so we need\n// to figure it out.\n# define GTEST_HAS_GLOBAL_WSTRING \\\n    (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING)\n#endif  // GTEST_HAS_GLOBAL_WSTRING\n\n// Determines whether RTTI is available.\n#ifndef GTEST_HAS_RTTI\n// The user didn't tell us whether RTTI is enabled, so we need to\n// figure it out.\n\n# ifdef _MSC_VER\n\n#  ifdef _CPPRTTI  // MSVC defines this macro iff RTTI is enabled.\n#   define GTEST_HAS_RTTI 1\n#  else\n#   define GTEST_HAS_RTTI 0\n#  endif\n\n// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled.\n# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302)\n\n#  ifdef __GXX_RTTI\n// When building against STLport with the Android NDK and with\n// -frtti -fno-exceptions, the build fails at link time with undefined\n// references to __cxa_bad_typeid. Note sure if STL or toolchain bug,\n// so disable RTTI when detected.\n#   if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && \\\n       !defined(__EXCEPTIONS)\n#    define GTEST_HAS_RTTI 0\n#   else\n#    define GTEST_HAS_RTTI 1\n#   endif  // GTEST_OS_LINUX_ANDROID && __STLPORT_MAJOR && !__EXCEPTIONS\n#  else\n#   define GTEST_HAS_RTTI 0\n#  endif  // __GXX_RTTI\n\n// Clang defines __GXX_RTTI starting with version 3.0, but its manual recommends\n// using has_feature instead. has_feature(cxx_rtti) is supported since 2.7, the\n// first version with C++ support.\n# elif defined(__clang__)\n\n#  define GTEST_HAS_RTTI __has_feature(cxx_rtti)\n\n// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if\n// both the typeid and dynamic_cast features are present.\n# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900)\n\n#  ifdef __RTTI_ALL__\n#   define GTEST_HAS_RTTI 1\n#  else\n#   define GTEST_HAS_RTTI 0\n#  endif\n\n# else\n\n// For all other compilers, we assume RTTI is enabled.\n#  define GTEST_HAS_RTTI 1\n\n# endif  // _MSC_VER\n\n#endif  // GTEST_HAS_RTTI\n\n// It's this header's responsibility to #include <typeinfo> when RTTI\n// is enabled.\n#if GTEST_HAS_RTTI\n# include <typeinfo>\n#endif\n\n// Determines whether Google Test can use the pthreads library.\n#ifndef GTEST_HAS_PTHREAD\n// The user didn't tell us explicitly, so we make reasonable assumptions about\n// which platforms have pthreads support.\n//\n// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0\n// to your compiler flags.\n# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX \\\n    || GTEST_OS_QNX || GTEST_OS_FREEBSD || GTEST_OS_NACL)\n#endif  // GTEST_HAS_PTHREAD\n\n#if GTEST_HAS_PTHREAD\n// gtest-port.h guarantees to #include <pthread.h> when GTEST_HAS_PTHREAD is\n// true.\n# include <pthread.h>  // NOLINT\n\n// For timespec and nanosleep, used below.\n# include <time.h>  // NOLINT\n#endif\n\n// Determines if hash_map/hash_set are available.\n// Only used for testing against those containers.\n#if !defined(GTEST_HAS_HASH_MAP_)\n# if _MSC_VER\n#  define GTEST_HAS_HASH_MAP_ 1  // Indicates that hash_map is available.\n#  define GTEST_HAS_HASH_SET_ 1  // Indicates that hash_set is available.\n# endif  // _MSC_VER\n#endif  // !defined(GTEST_HAS_HASH_MAP_)\n\n// Determines whether Google Test can use tr1/tuple.  You can define\n// this macro to 0 to prevent Google Test from using tuple (any\n// feature depending on tuple with be disabled in this mode).\n#ifndef GTEST_HAS_TR1_TUPLE\n# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR)\n// STLport, provided with the Android NDK, has neither <tr1/tuple> or <tuple>.\n#  define GTEST_HAS_TR1_TUPLE 0\n# else\n// The user didn't tell us not to do it, so we assume it's OK.\n#  define GTEST_HAS_TR1_TUPLE 1\n# endif\n#endif  // GTEST_HAS_TR1_TUPLE\n\n// Determines whether Google Test's own tr1 tuple implementation\n// should be used.\n#ifndef GTEST_USE_OWN_TR1_TUPLE\n// The user didn't tell us, so we need to figure it out.\n\n// We use our own TR1 tuple if we aren't sure the user has an\n// implementation of it already.  At this time, libstdc++ 4.0.0+ and\n// MSVC 2010 are the only mainstream standard libraries that come\n// with a TR1 tuple implementation.  NVIDIA's CUDA NVCC compiler\n// pretends to be GCC by defining __GNUC__ and friends, but cannot\n// compile GCC's tuple implementation.  MSVC 2008 (9.0) provides TR1\n// tuple in a 323 MB Feature Pack download, which we cannot assume the\n// user has.  QNX's QCC compiler is a modified GCC but it doesn't\n// support TR1 tuple.  libc++ only provides std::tuple, in C++11 mode,\n// and it can be used with some compilers that define __GNUC__.\n# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000) \\\n      && !GTEST_OS_QNX && !defined(_LIBCPP_VERSION)) || _MSC_VER >= 1600\n#  define GTEST_ENV_HAS_TR1_TUPLE_ 1\n# endif\n\n// C++11 specifies that <tuple> provides std::tuple. Use that if gtest is used\n// in C++11 mode and libstdc++ isn't very old (binaries targeting OS X 10.6\n// can build with clang but need to use gcc4.2's libstdc++).\n# if GTEST_LANG_CXX11 && (!defined(__GLIBCXX__) || __GLIBCXX__ > 20110325)\n#  define GTEST_ENV_HAS_STD_TUPLE_ 1\n# endif\n\n# if GTEST_ENV_HAS_TR1_TUPLE_ || GTEST_ENV_HAS_STD_TUPLE_\n#  define GTEST_USE_OWN_TR1_TUPLE 0\n# else\n#  define GTEST_USE_OWN_TR1_TUPLE 1\n# endif\n\n#endif  // GTEST_USE_OWN_TR1_TUPLE\n\n// To avoid conditional compilation everywhere, we make it\n// gtest-port.h's responsibility to #include the header implementing\n// tuple.\n#if GTEST_HAS_STD_TUPLE_\n# include <tuple>  // IWYU pragma: export\n# define GTEST_TUPLE_NAMESPACE_ ::std\n#endif  // GTEST_HAS_STD_TUPLE_\n\n// We include tr1::tuple even if std::tuple is available to define printers for\n// them.\n#if GTEST_HAS_TR1_TUPLE\n# ifndef GTEST_TUPLE_NAMESPACE_\n#  define GTEST_TUPLE_NAMESPACE_ ::std::tr1\n# endif  // GTEST_TUPLE_NAMESPACE_\n\n# if GTEST_USE_OWN_TR1_TUPLE\n// This file was GENERATED by command:\n//     pump.py gtest-tuple.h.pump\n// DO NOT EDIT BY HAND!!!\n\n// Copyright 2009 Google Inc.\n// All Rights Reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan)\n\n// Implements a subset of TR1 tuple needed by Google Test and Google Mock.\n\n#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_\n#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_\n\n#include <utility>  // For ::std::pair.\n\n// The compiler used in Symbian has a bug that prevents us from declaring the\n// tuple template as a friend (it complains that tuple is redefined).  This\n// hack bypasses the bug by declaring the members that should otherwise be\n// private as public.\n// Sun Studio versions < 12 also have the above bug.\n#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590)\n# define GTEST_DECLARE_TUPLE_AS_FRIEND_ public:\n#else\n# define GTEST_DECLARE_TUPLE_AS_FRIEND_ \\\n    template <GTEST_10_TYPENAMES_(U)> friend class tuple; \\\n   private:\n#endif\n\n// Visual Studio 2010, 2012, and 2013 define symbols in std::tr1 that conflict\n// with our own definitions. Therefore using our own tuple does not work on\n// those compilers.\n#if defined(_MSC_VER) && _MSC_VER >= 1600  /* 1600 is Visual Studio 2010 */\n# error \"gtest's tuple doesn't compile on Visual Studio 2010 or later. \\\nGTEST_USE_OWN_TR1_TUPLE must be set to 0 on those compilers.\"\n#endif\n\n// GTEST_n_TUPLE_(T) is the type of an n-tuple.\n#define GTEST_0_TUPLE_(T) tuple<>\n#define GTEST_1_TUPLE_(T) tuple<T##0, void, void, void, void, void, void, \\\n    void, void, void>\n#define GTEST_2_TUPLE_(T) tuple<T##0, T##1, void, void, void, void, void, \\\n    void, void, void>\n#define GTEST_3_TUPLE_(T) tuple<T##0, T##1, T##2, void, void, void, void, \\\n    void, void, void>\n#define GTEST_4_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, void, void, void, \\\n    void, void, void>\n#define GTEST_5_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, void, void, \\\n    void, void, void>\n#define GTEST_6_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, void, \\\n    void, void, void>\n#define GTEST_7_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \\\n    void, void, void>\n#define GTEST_8_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \\\n    T##7, void, void>\n#define GTEST_9_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \\\n    T##7, T##8, void>\n#define GTEST_10_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \\\n    T##7, T##8, T##9>\n\n// GTEST_n_TYPENAMES_(T) declares a list of n typenames.\n#define GTEST_0_TYPENAMES_(T)\n#define GTEST_1_TYPENAMES_(T) typename T##0\n#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1\n#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2\n#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \\\n    typename T##3\n#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \\\n    typename T##3, typename T##4\n#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \\\n    typename T##3, typename T##4, typename T##5\n#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \\\n    typename T##3, typename T##4, typename T##5, typename T##6\n#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \\\n    typename T##3, typename T##4, typename T##5, typename T##6, typename T##7\n#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \\\n    typename T##3, typename T##4, typename T##5, typename T##6, \\\n    typename T##7, typename T##8\n#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \\\n    typename T##3, typename T##4, typename T##5, typename T##6, \\\n    typename T##7, typename T##8, typename T##9\n\n// In theory, defining stuff in the ::std namespace is undefined\n// behavior.  We can do this as we are playing the role of a standard\n// library vendor.\nnamespace std {\nnamespace tr1 {\n\ntemplate <typename T0 = void, typename T1 = void, typename T2 = void,\n    typename T3 = void, typename T4 = void, typename T5 = void,\n    typename T6 = void, typename T7 = void, typename T8 = void,\n    typename T9 = void>\nclass tuple;\n\n// Anything in namespace gtest_internal is Google Test's INTERNAL\n// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code.\nnamespace gtest_internal {\n\n// ByRef<T>::type is T if T is a reference; otherwise it's const T&.\ntemplate <typename T>\nstruct ByRef { typedef const T& type; };  // NOLINT\ntemplate <typename T>\nstruct ByRef<T&> { typedef T& type; };  // NOLINT\n\n// A handy wrapper for ByRef.\n#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef<T>::type\n\n// AddRef<T>::type is T if T is a reference; otherwise it's T&.  This\n// is the same as tr1::add_reference<T>::type.\ntemplate <typename T>\nstruct AddRef { typedef T& type; };  // NOLINT\ntemplate <typename T>\nstruct AddRef<T&> { typedef T& type; };  // NOLINT\n\n// A handy wrapper for AddRef.\n#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef<T>::type\n\n// A helper for implementing get<k>().\ntemplate <int k> class Get;\n\n// A helper for implementing tuple_element<k, T>.  kIndexValid is true\n// iff k < the number of fields in tuple type T.\ntemplate <bool kIndexValid, int kIndex, class Tuple>\nstruct TupleElement;\n\ntemplate <GTEST_10_TYPENAMES_(T)>\nstruct TupleElement<true, 0, GTEST_10_TUPLE_(T) > {\n  typedef T0 type;\n};\n\ntemplate <GTEST_10_TYPENAMES_(T)>\nstruct TupleElement<true, 1, GTEST_10_TUPLE_(T) > {\n  typedef T1 type;\n};\n\ntemplate <GTEST_10_TYPENAMES_(T)>\nstruct TupleElement<true, 2, GTEST_10_TUPLE_(T) > {\n  typedef T2 type;\n};\n\ntemplate <GTEST_10_TYPENAMES_(T)>\nstruct TupleElement<true, 3, GTEST_10_TUPLE_(T) > {\n  typedef T3 type;\n};\n\ntemplate <GTEST_10_TYPENAMES_(T)>\nstruct TupleElement<true, 4, GTEST_10_TUPLE_(T) > {\n  typedef T4 type;\n};\n\ntemplate <GTEST_10_TYPENAMES_(T)>\nstruct TupleElement<true, 5, GTEST_10_TUPLE_(T) > {\n  typedef T5 type;\n};\n\ntemplate <GTEST_10_TYPENAMES_(T)>\nstruct TupleElement<true, 6, GTEST_10_TUPLE_(T) > {\n  typedef T6 type;\n};\n\ntemplate <GTEST_10_TYPENAMES_(T)>\nstruct TupleElement<true, 7, GTEST_10_TUPLE_(T) > {\n  typedef T7 type;\n};\n\ntemplate <GTEST_10_TYPENAMES_(T)>\nstruct TupleElement<true, 8, GTEST_10_TUPLE_(T) > {\n  typedef T8 type;\n};\n\ntemplate <GTEST_10_TYPENAMES_(T)>\nstruct TupleElement<true, 9, GTEST_10_TUPLE_(T) > {\n  typedef T9 type;\n};\n\n}  // namespace gtest_internal\n\ntemplate <>\nclass tuple<> {\n public:\n  tuple() {}\n  tuple(const tuple& /* t */)  {}\n  tuple& operator=(const tuple& /* t */) { return *this; }\n};\n\ntemplate <GTEST_1_TYPENAMES_(T)>\nclass GTEST_1_TUPLE_(T) {\n public:\n  template <int k> friend class gtest_internal::Get;\n\n  tuple() : f0_() {}\n\n  explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {}\n\n  tuple(const tuple& t) : f0_(t.f0_) {}\n\n  template <GTEST_1_TYPENAMES_(U)>\n  tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {}\n\n  tuple& operator=(const tuple& t) { return CopyFrom(t); }\n\n  template <GTEST_1_TYPENAMES_(U)>\n  tuple& operator=(const GTEST_1_TUPLE_(U)& t) {\n    return CopyFrom(t);\n  }\n\n  GTEST_DECLARE_TUPLE_AS_FRIEND_\n\n  template <GTEST_1_TYPENAMES_(U)>\n  tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) {\n    f0_ = t.f0_;\n    return *this;\n  }\n\n  T0 f0_;\n};\n\ntemplate <GTEST_2_TYPENAMES_(T)>\nclass GTEST_2_TUPLE_(T) {\n public:\n  template <int k> friend class gtest_internal::Get;\n\n  tuple() : f0_(), f1_() {}\n\n  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0),\n      f1_(f1) {}\n\n  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {}\n\n  template <GTEST_2_TYPENAMES_(U)>\n  tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {}\n  template <typename U0, typename U1>\n  tuple(const ::std::pair<U0, U1>& p) : f0_(p.first), f1_(p.second) {}\n\n  tuple& operator=(const tuple& t) { return CopyFrom(t); }\n\n  template <GTEST_2_TYPENAMES_(U)>\n  tuple& operator=(const GTEST_2_TUPLE_(U)& t) {\n    return CopyFrom(t);\n  }\n  template <typename U0, typename U1>\n  tuple& operator=(const ::std::pair<U0, U1>& p) {\n    f0_ = p.first;\n    f1_ = p.second;\n    return *this;\n  }\n\n  GTEST_DECLARE_TUPLE_AS_FRIEND_\n\n  template <GTEST_2_TYPENAMES_(U)>\n  tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) {\n    f0_ = t.f0_;\n    f1_ = t.f1_;\n    return *this;\n  }\n\n  T0 f0_;\n  T1 f1_;\n};\n\ntemplate <GTEST_3_TYPENAMES_(T)>\nclass GTEST_3_TUPLE_(T) {\n public:\n  template <int k> friend class gtest_internal::Get;\n\n  tuple() : f0_(), f1_(), f2_() {}\n\n  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,\n      GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {}\n\n  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}\n\n  template <GTEST_3_TYPENAMES_(U)>\n  tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}\n\n  tuple& operator=(const tuple& t) { return CopyFrom(t); }\n\n  template <GTEST_3_TYPENAMES_(U)>\n  tuple& operator=(const GTEST_3_TUPLE_(U)& t) {\n    return CopyFrom(t);\n  }\n\n  GTEST_DECLARE_TUPLE_AS_FRIEND_\n\n  template <GTEST_3_TYPENAMES_(U)>\n  tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) {\n    f0_ = t.f0_;\n    f1_ = t.f1_;\n    f2_ = t.f2_;\n    return *this;\n  }\n\n  T0 f0_;\n  T1 f1_;\n  T2 f2_;\n};\n\ntemplate <GTEST_4_TYPENAMES_(T)>\nclass GTEST_4_TUPLE_(T) {\n public:\n  template <int k> friend class gtest_internal::Get;\n\n  tuple() : f0_(), f1_(), f2_(), f3_() {}\n\n  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,\n      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2),\n      f3_(f3) {}\n\n  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {}\n\n  template <GTEST_4_TYPENAMES_(U)>\n  tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),\n      f3_(t.f3_) {}\n\n  tuple& operator=(const tuple& t) { return CopyFrom(t); }\n\n  template <GTEST_4_TYPENAMES_(U)>\n  tuple& operator=(const GTEST_4_TUPLE_(U)& t) {\n    return CopyFrom(t);\n  }\n\n  GTEST_DECLARE_TUPLE_AS_FRIEND_\n\n  template <GTEST_4_TYPENAMES_(U)>\n  tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) {\n    f0_ = t.f0_;\n    f1_ = t.f1_;\n    f2_ = t.f2_;\n    f3_ = t.f3_;\n    return *this;\n  }\n\n  T0 f0_;\n  T1 f1_;\n  T2 f2_;\n  T3 f3_;\n};\n\ntemplate <GTEST_5_TYPENAMES_(T)>\nclass GTEST_5_TUPLE_(T) {\n public:\n  template <int k> friend class gtest_internal::Get;\n\n  tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {}\n\n  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,\n      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3,\n      GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {}\n\n  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),\n      f4_(t.f4_) {}\n\n  template <GTEST_5_TYPENAMES_(U)>\n  tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),\n      f3_(t.f3_), f4_(t.f4_) {}\n\n  tuple& operator=(const tuple& t) { return CopyFrom(t); }\n\n  template <GTEST_5_TYPENAMES_(U)>\n  tuple& operator=(const GTEST_5_TUPLE_(U)& t) {\n    return CopyFrom(t);\n  }\n\n  GTEST_DECLARE_TUPLE_AS_FRIEND_\n\n  template <GTEST_5_TYPENAMES_(U)>\n  tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) {\n    f0_ = t.f0_;\n    f1_ = t.f1_;\n    f2_ = t.f2_;\n    f3_ = t.f3_;\n    f4_ = t.f4_;\n    return *this;\n  }\n\n  T0 f0_;\n  T1 f1_;\n  T2 f2_;\n  T3 f3_;\n  T4 f4_;\n};\n\ntemplate <GTEST_6_TYPENAMES_(T)>\nclass GTEST_6_TUPLE_(T) {\n public:\n  template <int k> friend class gtest_internal::Get;\n\n  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {}\n\n  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,\n      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,\n      GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),\n      f5_(f5) {}\n\n  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),\n      f4_(t.f4_), f5_(t.f5_) {}\n\n  template <GTEST_6_TYPENAMES_(U)>\n  tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),\n      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {}\n\n  tuple& operator=(const tuple& t) { return CopyFrom(t); }\n\n  template <GTEST_6_TYPENAMES_(U)>\n  tuple& operator=(const GTEST_6_TUPLE_(U)& t) {\n    return CopyFrom(t);\n  }\n\n  GTEST_DECLARE_TUPLE_AS_FRIEND_\n\n  template <GTEST_6_TYPENAMES_(U)>\n  tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) {\n    f0_ = t.f0_;\n    f1_ = t.f1_;\n    f2_ = t.f2_;\n    f3_ = t.f3_;\n    f4_ = t.f4_;\n    f5_ = t.f5_;\n    return *this;\n  }\n\n  T0 f0_;\n  T1 f1_;\n  T2 f2_;\n  T3 f3_;\n  T4 f4_;\n  T5 f5_;\n};\n\ntemplate <GTEST_7_TYPENAMES_(T)>\nclass GTEST_7_TUPLE_(T) {\n public:\n  template <int k> friend class gtest_internal::Get;\n\n  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {}\n\n  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,\n      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,\n      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2),\n      f3_(f3), f4_(f4), f5_(f5), f6_(f6) {}\n\n  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),\n      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}\n\n  template <GTEST_7_TYPENAMES_(U)>\n  tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),\n      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}\n\n  tuple& operator=(const tuple& t) { return CopyFrom(t); }\n\n  template <GTEST_7_TYPENAMES_(U)>\n  tuple& operator=(const GTEST_7_TUPLE_(U)& t) {\n    return CopyFrom(t);\n  }\n\n  GTEST_DECLARE_TUPLE_AS_FRIEND_\n\n  template <GTEST_7_TYPENAMES_(U)>\n  tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) {\n    f0_ = t.f0_;\n    f1_ = t.f1_;\n    f2_ = t.f2_;\n    f3_ = t.f3_;\n    f4_ = t.f4_;\n    f5_ = t.f5_;\n    f6_ = t.f6_;\n    return *this;\n  }\n\n  T0 f0_;\n  T1 f1_;\n  T2 f2_;\n  T3 f3_;\n  T4 f4_;\n  T5 f5_;\n  T6 f6_;\n};\n\ntemplate <GTEST_8_TYPENAMES_(T)>\nclass GTEST_8_TUPLE_(T) {\n public:\n  template <int k> friend class gtest_internal::Get;\n\n  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {}\n\n  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,\n      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,\n      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6,\n      GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),\n      f5_(f5), f6_(f6), f7_(f7) {}\n\n  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),\n      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}\n\n  template <GTEST_8_TYPENAMES_(U)>\n  tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),\n      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}\n\n  tuple& operator=(const tuple& t) { return CopyFrom(t); }\n\n  template <GTEST_8_TYPENAMES_(U)>\n  tuple& operator=(const GTEST_8_TUPLE_(U)& t) {\n    return CopyFrom(t);\n  }\n\n  GTEST_DECLARE_TUPLE_AS_FRIEND_\n\n  template <GTEST_8_TYPENAMES_(U)>\n  tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) {\n    f0_ = t.f0_;\n    f1_ = t.f1_;\n    f2_ = t.f2_;\n    f3_ = t.f3_;\n    f4_ = t.f4_;\n    f5_ = t.f5_;\n    f6_ = t.f6_;\n    f7_ = t.f7_;\n    return *this;\n  }\n\n  T0 f0_;\n  T1 f1_;\n  T2 f2_;\n  T3 f3_;\n  T4 f4_;\n  T5 f5_;\n  T6 f6_;\n  T7 f7_;\n};\n\ntemplate <GTEST_9_TYPENAMES_(T)>\nclass GTEST_9_TUPLE_(T) {\n public:\n  template <int k> friend class gtest_internal::Get;\n\n  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {}\n\n  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,\n      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,\n      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,\n      GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),\n      f5_(f5), f6_(f6), f7_(f7), f8_(f8) {}\n\n  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),\n      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}\n\n  template <GTEST_9_TYPENAMES_(U)>\n  tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),\n      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}\n\n  tuple& operator=(const tuple& t) { return CopyFrom(t); }\n\n  template <GTEST_9_TYPENAMES_(U)>\n  tuple& operator=(const GTEST_9_TUPLE_(U)& t) {\n    return CopyFrom(t);\n  }\n\n  GTEST_DECLARE_TUPLE_AS_FRIEND_\n\n  template <GTEST_9_TYPENAMES_(U)>\n  tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) {\n    f0_ = t.f0_;\n    f1_ = t.f1_;\n    f2_ = t.f2_;\n    f3_ = t.f3_;\n    f4_ = t.f4_;\n    f5_ = t.f5_;\n    f6_ = t.f6_;\n    f7_ = t.f7_;\n    f8_ = t.f8_;\n    return *this;\n  }\n\n  T0 f0_;\n  T1 f1_;\n  T2 f2_;\n  T3 f3_;\n  T4 f4_;\n  T5 f5_;\n  T6 f6_;\n  T7 f7_;\n  T8 f8_;\n};\n\ntemplate <GTEST_10_TYPENAMES_(T)>\nclass tuple {\n public:\n  template <int k> friend class gtest_internal::Get;\n\n  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(),\n      f9_() {}\n\n  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,\n      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,\n      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,\n      GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2),\n      f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {}\n\n  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),\n      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {}\n\n  template <GTEST_10_TYPENAMES_(U)>\n  tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),\n      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_),\n      f9_(t.f9_) {}\n\n  tuple& operator=(const tuple& t) { return CopyFrom(t); }\n\n  template <GTEST_10_TYPENAMES_(U)>\n  tuple& operator=(const GTEST_10_TUPLE_(U)& t) {\n    return CopyFrom(t);\n  }\n\n  GTEST_DECLARE_TUPLE_AS_FRIEND_\n\n  template <GTEST_10_TYPENAMES_(U)>\n  tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) {\n    f0_ = t.f0_;\n    f1_ = t.f1_;\n    f2_ = t.f2_;\n    f3_ = t.f3_;\n    f4_ = t.f4_;\n    f5_ = t.f5_;\n    f6_ = t.f6_;\n    f7_ = t.f7_;\n    f8_ = t.f8_;\n    f9_ = t.f9_;\n    return *this;\n  }\n\n  T0 f0_;\n  T1 f1_;\n  T2 f2_;\n  T3 f3_;\n  T4 f4_;\n  T5 f5_;\n  T6 f6_;\n  T7 f7_;\n  T8 f8_;\n  T9 f9_;\n};\n\n// 6.1.3.2 Tuple creation functions.\n\n// Known limitations: we don't support passing an\n// std::tr1::reference_wrapper<T> to make_tuple().  And we don't\n// implement tie().\n\ninline tuple<> make_tuple() { return tuple<>(); }\n\ntemplate <GTEST_1_TYPENAMES_(T)>\ninline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) {\n  return GTEST_1_TUPLE_(T)(f0);\n}\n\ntemplate <GTEST_2_TYPENAMES_(T)>\ninline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) {\n  return GTEST_2_TUPLE_(T)(f0, f1);\n}\n\ntemplate <GTEST_3_TYPENAMES_(T)>\ninline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) {\n  return GTEST_3_TUPLE_(T)(f0, f1, f2);\n}\n\ntemplate <GTEST_4_TYPENAMES_(T)>\ninline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,\n    const T3& f3) {\n  return GTEST_4_TUPLE_(T)(f0, f1, f2, f3);\n}\n\ntemplate <GTEST_5_TYPENAMES_(T)>\ninline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,\n    const T3& f3, const T4& f4) {\n  return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4);\n}\n\ntemplate <GTEST_6_TYPENAMES_(T)>\ninline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,\n    const T3& f3, const T4& f4, const T5& f5) {\n  return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5);\n}\n\ntemplate <GTEST_7_TYPENAMES_(T)>\ninline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,\n    const T3& f3, const T4& f4, const T5& f5, const T6& f6) {\n  return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6);\n}\n\ntemplate <GTEST_8_TYPENAMES_(T)>\ninline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,\n    const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) {\n  return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7);\n}\n\ntemplate <GTEST_9_TYPENAMES_(T)>\ninline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,\n    const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,\n    const T8& f8) {\n  return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8);\n}\n\ntemplate <GTEST_10_TYPENAMES_(T)>\ninline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,\n    const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,\n    const T8& f8, const T9& f9) {\n  return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9);\n}\n\n// 6.1.3.3 Tuple helper classes.\n\ntemplate <typename Tuple> struct tuple_size;\n\ntemplate <GTEST_0_TYPENAMES_(T)>\nstruct tuple_size<GTEST_0_TUPLE_(T) > {\n  static const int value = 0;\n};\n\ntemplate <GTEST_1_TYPENAMES_(T)>\nstruct tuple_size<GTEST_1_TUPLE_(T) > {\n  static const int value = 1;\n};\n\ntemplate <GTEST_2_TYPENAMES_(T)>\nstruct tuple_size<GTEST_2_TUPLE_(T) > {\n  static const int value = 2;\n};\n\ntemplate <GTEST_3_TYPENAMES_(T)>\nstruct tuple_size<GTEST_3_TUPLE_(T) > {\n  static const int value = 3;\n};\n\ntemplate <GTEST_4_TYPENAMES_(T)>\nstruct tuple_size<GTEST_4_TUPLE_(T) > {\n  static const int value = 4;\n};\n\ntemplate <GTEST_5_TYPENAMES_(T)>\nstruct tuple_size<GTEST_5_TUPLE_(T) > {\n  static const int value = 5;\n};\n\ntemplate <GTEST_6_TYPENAMES_(T)>\nstruct tuple_size<GTEST_6_TUPLE_(T) > {\n  static const int value = 6;\n};\n\ntemplate <GTEST_7_TYPENAMES_(T)>\nstruct tuple_size<GTEST_7_TUPLE_(T) > {\n  static const int value = 7;\n};\n\ntemplate <GTEST_8_TYPENAMES_(T)>\nstruct tuple_size<GTEST_8_TUPLE_(T) > {\n  static const int value = 8;\n};\n\ntemplate <GTEST_9_TYPENAMES_(T)>\nstruct tuple_size<GTEST_9_TUPLE_(T) > {\n  static const int value = 9;\n};\n\ntemplate <GTEST_10_TYPENAMES_(T)>\nstruct tuple_size<GTEST_10_TUPLE_(T) > {\n  static const int value = 10;\n};\n\ntemplate <int k, class Tuple>\nstruct tuple_element {\n  typedef typename gtest_internal::TupleElement<\n      k < (tuple_size<Tuple>::value), k, Tuple>::type type;\n};\n\n#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element<k, Tuple >::type\n\n// 6.1.3.4 Element access.\n\nnamespace gtest_internal {\n\ntemplate <>\nclass Get<0> {\n public:\n  template <class Tuple>\n  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))\n  Field(Tuple& t) { return t.f0_; }  // NOLINT\n\n  template <class Tuple>\n  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))\n  ConstField(const Tuple& t) { return t.f0_; }\n};\n\ntemplate <>\nclass Get<1> {\n public:\n  template <class Tuple>\n  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))\n  Field(Tuple& t) { return t.f1_; }  // NOLINT\n\n  template <class Tuple>\n  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))\n  ConstField(const Tuple& t) { return t.f1_; }\n};\n\ntemplate <>\nclass Get<2> {\n public:\n  template <class Tuple>\n  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))\n  Field(Tuple& t) { return t.f2_; }  // NOLINT\n\n  template <class Tuple>\n  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))\n  ConstField(const Tuple& t) { return t.f2_; }\n};\n\ntemplate <>\nclass Get<3> {\n public:\n  template <class Tuple>\n  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))\n  Field(Tuple& t) { return t.f3_; }  // NOLINT\n\n  template <class Tuple>\n  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))\n  ConstField(const Tuple& t) { return t.f3_; }\n};\n\ntemplate <>\nclass Get<4> {\n public:\n  template <class Tuple>\n  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))\n  Field(Tuple& t) { return t.f4_; }  // NOLINT\n\n  template <class Tuple>\n  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))\n  ConstField(const Tuple& t) { return t.f4_; }\n};\n\ntemplate <>\nclass Get<5> {\n public:\n  template <class Tuple>\n  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))\n  Field(Tuple& t) { return t.f5_; }  // NOLINT\n\n  template <class Tuple>\n  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))\n  ConstField(const Tuple& t) { return t.f5_; }\n};\n\ntemplate <>\nclass Get<6> {\n public:\n  template <class Tuple>\n  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))\n  Field(Tuple& t) { return t.f6_; }  // NOLINT\n\n  template <class Tuple>\n  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))\n  ConstField(const Tuple& t) { return t.f6_; }\n};\n\ntemplate <>\nclass Get<7> {\n public:\n  template <class Tuple>\n  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))\n  Field(Tuple& t) { return t.f7_; }  // NOLINT\n\n  template <class Tuple>\n  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))\n  ConstField(const Tuple& t) { return t.f7_; }\n};\n\ntemplate <>\nclass Get<8> {\n public:\n  template <class Tuple>\n  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))\n  Field(Tuple& t) { return t.f8_; }  // NOLINT\n\n  template <class Tuple>\n  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))\n  ConstField(const Tuple& t) { return t.f8_; }\n};\n\ntemplate <>\nclass Get<9> {\n public:\n  template <class Tuple>\n  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))\n  Field(Tuple& t) { return t.f9_; }  // NOLINT\n\n  template <class Tuple>\n  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))\n  ConstField(const Tuple& t) { return t.f9_; }\n};\n\n}  // namespace gtest_internal\n\ntemplate <int k, GTEST_10_TYPENAMES_(T)>\nGTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))\nget(GTEST_10_TUPLE_(T)& t) {\n  return gtest_internal::Get<k>::Field(t);\n}\n\ntemplate <int k, GTEST_10_TYPENAMES_(T)>\nGTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k,  GTEST_10_TUPLE_(T)))\nget(const GTEST_10_TUPLE_(T)& t) {\n  return gtest_internal::Get<k>::ConstField(t);\n}\n\n// 6.1.3.5 Relational operators\n\n// We only implement == and !=, as we don't have a need for the rest yet.\n\nnamespace gtest_internal {\n\n// SameSizeTuplePrefixComparator<k, k>::Eq(t1, t2) returns true if the\n// first k fields of t1 equals the first k fields of t2.\n// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if\n// k1 != k2.\ntemplate <int kSize1, int kSize2>\nstruct SameSizeTuplePrefixComparator;\n\ntemplate <>\nstruct SameSizeTuplePrefixComparator<0, 0> {\n  template <class Tuple1, class Tuple2>\n  static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) {\n    return true;\n  }\n};\n\ntemplate <int k>\nstruct SameSizeTuplePrefixComparator<k, k> {\n  template <class Tuple1, class Tuple2>\n  static bool Eq(const Tuple1& t1, const Tuple2& t2) {\n    return SameSizeTuplePrefixComparator<k - 1, k - 1>::Eq(t1, t2) &&\n        ::std::tr1::get<k - 1>(t1) == ::std::tr1::get<k - 1>(t2);\n  }\n};\n\n}  // namespace gtest_internal\n\ntemplate <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>\ninline bool operator==(const GTEST_10_TUPLE_(T)& t,\n                       const GTEST_10_TUPLE_(U)& u) {\n  return gtest_internal::SameSizeTuplePrefixComparator<\n      tuple_size<GTEST_10_TUPLE_(T) >::value,\n      tuple_size<GTEST_10_TUPLE_(U) >::value>::Eq(t, u);\n}\n\ntemplate <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>\ninline bool operator!=(const GTEST_10_TUPLE_(T)& t,\n                       const GTEST_10_TUPLE_(U)& u) { return !(t == u); }\n\n// 6.1.4 Pairs.\n// Unimplemented.\n\n}  // namespace tr1\n}  // namespace std\n\n#undef GTEST_0_TUPLE_\n#undef GTEST_1_TUPLE_\n#undef GTEST_2_TUPLE_\n#undef GTEST_3_TUPLE_\n#undef GTEST_4_TUPLE_\n#undef GTEST_5_TUPLE_\n#undef GTEST_6_TUPLE_\n#undef GTEST_7_TUPLE_\n#undef GTEST_8_TUPLE_\n#undef GTEST_9_TUPLE_\n#undef GTEST_10_TUPLE_\n\n#undef GTEST_0_TYPENAMES_\n#undef GTEST_1_TYPENAMES_\n#undef GTEST_2_TYPENAMES_\n#undef GTEST_3_TYPENAMES_\n#undef GTEST_4_TYPENAMES_\n#undef GTEST_5_TYPENAMES_\n#undef GTEST_6_TYPENAMES_\n#undef GTEST_7_TYPENAMES_\n#undef GTEST_8_TYPENAMES_\n#undef GTEST_9_TYPENAMES_\n#undef GTEST_10_TYPENAMES_\n\n#undef GTEST_DECLARE_TUPLE_AS_FRIEND_\n#undef GTEST_BY_REF_\n#undef GTEST_ADD_REF_\n#undef GTEST_TUPLE_ELEMENT_\n\n#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_\n# elif GTEST_ENV_HAS_STD_TUPLE_\n#  include <tuple>\n// C++11 puts its tuple into the ::std namespace rather than\n// ::std::tr1.  gtest expects tuple to live in ::std::tr1, so put it there.\n// This causes undefined behavior, but supported compilers react in\n// the way we intend.\nnamespace std {\nnamespace tr1 {\nusing ::std::get;\nusing ::std::make_tuple;\nusing ::std::tuple;\nusing ::std::tuple_element;\nusing ::std::tuple_size;\n}\n}\n\n# elif GTEST_OS_SYMBIAN\n\n// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to\n// use STLport's tuple implementation, which unfortunately doesn't\n// work as the copy of STLport distributed with Symbian is incomplete.\n// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to\n// use its own tuple implementation.\n#  ifdef BOOST_HAS_TR1_TUPLE\n#   undef BOOST_HAS_TR1_TUPLE\n#  endif  // BOOST_HAS_TR1_TUPLE\n\n// This prevents <boost/tr1/detail/config.hpp>, which defines\n// BOOST_HAS_TR1_TUPLE, from being #included by Boost's <tuple>.\n#  define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED\n#  include <tuple>  // IWYU pragma: export  // NOLINT\n\n# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000)\n// GCC 4.0+ implements tr1/tuple in the <tr1/tuple> header.  This does\n// not conform to the TR1 spec, which requires the header to be <tuple>.\n\n#  if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302\n// Until version 4.3.2, gcc has a bug that causes <tr1/functional>,\n// which is #included by <tr1/tuple>, to not compile when RTTI is\n// disabled.  _TR1_FUNCTIONAL is the header guard for\n// <tr1/functional>.  Hence the following #define is a hack to prevent\n// <tr1/functional> from being included.\n#   define _TR1_FUNCTIONAL 1\n#   include <tr1/tuple>\n#   undef _TR1_FUNCTIONAL  // Allows the user to #include\n                        // <tr1/functional> if he chooses to.\n#  else\n#   include <tr1/tuple>  // NOLINT\n#  endif  // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302\n\n# else\n// If the compiler is not GCC 4.0+, we assume the user is using a\n// spec-conforming TR1 implementation.\n#  include <tuple>  // IWYU pragma: export  // NOLINT\n# endif  // GTEST_USE_OWN_TR1_TUPLE\n\n#endif  // GTEST_HAS_TR1_TUPLE\n\n// Determines whether clone(2) is supported.\n// Usually it will only be available on Linux, excluding\n// Linux on the Itanium architecture.\n// Also see http://linux.die.net/man/2/clone.\n#ifndef GTEST_HAS_CLONE\n// The user didn't tell us, so we need to figure it out.\n\n# if GTEST_OS_LINUX && !defined(__ia64__)\n#  if GTEST_OS_LINUX_ANDROID\n// On Android, clone() is only available on ARM starting with Gingerbread.\n#    if defined(__arm__) && __ANDROID_API__ >= 9\n#     define GTEST_HAS_CLONE 1\n#    else\n#     define GTEST_HAS_CLONE 0\n#    endif\n#  else\n#   define GTEST_HAS_CLONE 1\n#  endif\n# else\n#  define GTEST_HAS_CLONE 0\n# endif  // GTEST_OS_LINUX && !defined(__ia64__)\n\n#endif  // GTEST_HAS_CLONE\n\n// Determines whether to support stream redirection. This is used to test\n// output correctness and to implement death tests.\n#ifndef GTEST_HAS_STREAM_REDIRECTION\n// By default, we assume that stream redirection is supported on all\n// platforms except known mobile ones.\n# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || \\\n    GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT\n#  define GTEST_HAS_STREAM_REDIRECTION 0\n# else\n#  define GTEST_HAS_STREAM_REDIRECTION 1\n# endif  // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN\n#endif  // GTEST_HAS_STREAM_REDIRECTION\n\n// Determines whether to support death tests.\n// Google Test does not support death tests for VC 7.1 and earlier as\n// abort() in a VC 7.1 application compiled as GUI in debug config\n// pops up a dialog window that cannot be suppressed programmatically.\n#if (GTEST_OS_LINUX || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \\\n     (GTEST_OS_MAC && !GTEST_OS_IOS) || \\\n     (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \\\n     GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX || \\\n     GTEST_OS_OPENBSD || GTEST_OS_QNX || GTEST_OS_FREEBSD)\n# define GTEST_HAS_DEATH_TEST 1\n#endif\n\n// We don't support MSVC 7.1 with exceptions disabled now.  Therefore\n// all the compilers we care about are adequate for supporting\n// value-parameterized tests.\n#define GTEST_HAS_PARAM_TEST 1\n\n// Determines whether to support type-driven tests.\n\n// Typed tests need <typeinfo> and variadic macros, which GCC, VC++ 8.0,\n// Sun Pro CC, IBM Visual Age, and HP aCC support.\n#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \\\n    defined(__IBMCPP__) || defined(__HP_aCC)\n# define GTEST_HAS_TYPED_TEST 1\n# define GTEST_HAS_TYPED_TEST_P 1\n#endif\n\n// Determines whether to support Combine(). This only makes sense when\n// value-parameterized tests are enabled.  The implementation doesn't\n// work on Sun Studio since it doesn't understand templated conversion\n// operators.\n#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC)\n# define GTEST_HAS_COMBINE 1\n#endif\n\n// Determines whether the system compiler uses UTF-16 for encoding wide strings.\n#define GTEST_WIDE_STRING_USES_UTF16_ \\\n    (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX)\n\n// Determines whether test results can be streamed to a socket.\n#if GTEST_OS_LINUX\n# define GTEST_CAN_STREAM_RESULTS_ 1\n#endif\n\n// Defines some utility macros.\n\n// The GNU compiler emits a warning if nested \"if\" statements are followed by\n// an \"else\" statement and braces are not used to explicitly disambiguate the\n// \"else\" binding.  This leads to problems with code like:\n//\n//   if (gate)\n//     ASSERT_*(condition) << \"Some message\";\n//\n// The \"switch (0) case 0:\" idiom is used to suppress this.\n#ifdef __INTEL_COMPILER\n# define GTEST_AMBIGUOUS_ELSE_BLOCKER_\n#else\n# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default:  // NOLINT\n#endif\n\n// Use this annotation at the end of a struct/class definition to\n// prevent the compiler from optimizing away instances that are never\n// used.  This is useful when all interesting logic happens inside the\n// c'tor and / or d'tor.  Example:\n//\n//   struct Foo {\n//     Foo() { ... }\n//   } GTEST_ATTRIBUTE_UNUSED_;\n//\n// Also use it after a variable or parameter declaration to tell the\n// compiler the variable/parameter does not have to be used.\n#if defined(__GNUC__) && !defined(COMPILER_ICC)\n# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))\n#elif defined(__clang__)\n# if __has_attribute(unused)\n#  define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))\n# endif\n#endif\n#ifndef GTEST_ATTRIBUTE_UNUSED_\n# define GTEST_ATTRIBUTE_UNUSED_\n#endif\n\n// A macro to disallow operator=\n// This should be used in the private: declarations for a class.\n#define GTEST_DISALLOW_ASSIGN_(type)\\\n  void operator=(type const &)\n\n// A macro to disallow copy constructor and operator=\n// This should be used in the private: declarations for a class.\n#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\\\n  type(type const &);\\\n  GTEST_DISALLOW_ASSIGN_(type)\n\n// Tell the compiler to warn about unused return values for functions declared\n// with this macro.  The macro should be used on function declarations\n// following the argument list:\n//\n//   Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_;\n#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC)\n# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result))\n#else\n# define GTEST_MUST_USE_RESULT_\n#endif  // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC\n\n// MS C++ compiler emits warning when a conditional expression is compile time\n// constant. In some contexts this warning is false positive and needs to be\n// suppressed. Use the following two macros in such cases:\n//\n// GTEST_INTENTIONAL_CONST_COND_PUSH_()\n// while (true) {\n// GTEST_INTENTIONAL_CONST_COND_POP_()\n// }\n# define GTEST_INTENTIONAL_CONST_COND_PUSH_() \\\n    GTEST_DISABLE_MSC_WARNINGS_PUSH_(4127)\n# define GTEST_INTENTIONAL_CONST_COND_POP_() \\\n    GTEST_DISABLE_MSC_WARNINGS_POP_()\n\n// Determine whether the compiler supports Microsoft's Structured Exception\n// Handling.  This is supported by several Windows compilers but generally\n// does not exist on any other system.\n#ifndef GTEST_HAS_SEH\n// The user didn't tell us, so we need to figure it out.\n\n# if defined(_MSC_VER) || defined(__BORLANDC__)\n// These two compilers are known to support SEH.\n#  define GTEST_HAS_SEH 1\n# else\n// Assume no SEH.\n#  define GTEST_HAS_SEH 0\n# endif\n\n#define GTEST_IS_THREADSAFE \\\n    (GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ \\\n     || (GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT) \\\n     || GTEST_HAS_PTHREAD)\n\n#endif  // GTEST_HAS_SEH\n\n#ifdef _MSC_VER\n# if GTEST_LINKED_AS_SHARED_LIBRARY\n#  define GTEST_API_ __declspec(dllimport)\n# elif GTEST_CREATE_SHARED_LIBRARY\n#  define GTEST_API_ __declspec(dllexport)\n# endif\n#elif __GNUC__ >= 4 || defined(__clang__)\n# define GTEST_API_ __attribute__((visibility (\"default\")))\n#endif // _MSC_VER\n\n#ifndef GTEST_API_\n# define GTEST_API_\n#endif\n\n#ifdef __GNUC__\n// Ask the compiler to never inline a given function.\n# define GTEST_NO_INLINE_ __attribute__((noinline))\n#else\n# define GTEST_NO_INLINE_\n#endif\n\n// _LIBCPP_VERSION is defined by the libc++ library from the LLVM project.\n#if defined(__GLIBCXX__) || defined(_LIBCPP_VERSION)\n# define GTEST_HAS_CXXABI_H_ 1\n#else\n# define GTEST_HAS_CXXABI_H_ 0\n#endif\n\n// A function level attribute to disable checking for use of uninitialized\n// memory when built with MemorySanitizer.\n#if defined(__clang__)\n# if __has_feature(memory_sanitizer)\n#  define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ \\\n       __attribute__((no_sanitize_memory))\n# else\n#  define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_\n# endif  // __has_feature(memory_sanitizer)\n#else\n# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_\n#endif  // __clang__\n\n// A function level attribute to disable AddressSanitizer instrumentation.\n#if defined(__clang__)\n# if __has_feature(address_sanitizer)\n#  define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ \\\n       __attribute__((no_sanitize_address))\n# else\n#  define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_\n# endif  // __has_feature(address_sanitizer)\n#else\n# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_\n#endif  // __clang__\n\n// A function level attribute to disable ThreadSanitizer instrumentation.\n#if defined(__clang__)\n# if __has_feature(thread_sanitizer)\n#  define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ \\\n       __attribute__((no_sanitize_thread))\n# else\n#  define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_\n# endif  // __has_feature(thread_sanitizer)\n#else\n# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_\n#endif  // __clang__\n\nnamespace testing {\n\nclass Message;\n\n#if defined(GTEST_TUPLE_NAMESPACE_)\n// Import tuple and friends into the ::testing namespace.\n// It is part of our interface, having them in ::testing allows us to change\n// their types as needed.\nusing GTEST_TUPLE_NAMESPACE_::get;\nusing GTEST_TUPLE_NAMESPACE_::make_tuple;\nusing GTEST_TUPLE_NAMESPACE_::tuple;\nusing GTEST_TUPLE_NAMESPACE_::tuple_size;\nusing GTEST_TUPLE_NAMESPACE_::tuple_element;\n#endif  // defined(GTEST_TUPLE_NAMESPACE_)\n\nnamespace internal {\n\n// A secret type that Google Test users don't know about.  It has no\n// definition on purpose.  Therefore it's impossible to create a\n// Secret object, which is what we want.\nclass Secret;\n\n// The GTEST_COMPILE_ASSERT_ macro can be used to verify that a compile time\n// expression is true. For example, you could use it to verify the\n// size of a static array:\n//\n//   GTEST_COMPILE_ASSERT_(GTEST_ARRAY_SIZE_(names) == NUM_NAMES,\n//                         names_incorrect_size);\n//\n// or to make sure a struct is smaller than a certain size:\n//\n//   GTEST_COMPILE_ASSERT_(sizeof(foo) < 128, foo_too_large);\n//\n// The second argument to the macro is the name of the variable. If\n// the expression is false, most compilers will issue a warning/error\n// containing the name of the variable.\n\n#if GTEST_LANG_CXX11\n# define GTEST_COMPILE_ASSERT_(expr, msg) static_assert(expr, #msg)\n#else  // !GTEST_LANG_CXX11\ntemplate <bool>\n  struct CompileAssert {\n};\n\n# define GTEST_COMPILE_ASSERT_(expr, msg) \\\n  typedef ::testing::internal::CompileAssert<(static_cast<bool>(expr))> \\\n      msg[static_cast<bool>(expr) ? 1 : -1] GTEST_ATTRIBUTE_UNUSED_\n#endif  // !GTEST_LANG_CXX11\n\n// Implementation details of GTEST_COMPILE_ASSERT_:\n//\n// (In C++11, we simply use static_assert instead of the following)\n//\n// - GTEST_COMPILE_ASSERT_ works by defining an array type that has -1\n//   elements (and thus is invalid) when the expression is false.\n//\n// - The simpler definition\n//\n//    #define GTEST_COMPILE_ASSERT_(expr, msg) typedef char msg[(expr) ? 1 : -1]\n//\n//   does not work, as gcc supports variable-length arrays whose sizes\n//   are determined at run-time (this is gcc's extension and not part\n//   of the C++ standard).  As a result, gcc fails to reject the\n//   following code with the simple definition:\n//\n//     int foo;\n//     GTEST_COMPILE_ASSERT_(foo, msg); // not supposed to compile as foo is\n//                                      // not a compile-time constant.\n//\n// - By using the type CompileAssert<(bool(expr))>, we ensures that\n//   expr is a compile-time constant.  (Template arguments must be\n//   determined at compile-time.)\n//\n// - The outter parentheses in CompileAssert<(bool(expr))> are necessary\n//   to work around a bug in gcc 3.4.4 and 4.0.1.  If we had written\n//\n//     CompileAssert<bool(expr)>\n//\n//   instead, these compilers will refuse to compile\n//\n//     GTEST_COMPILE_ASSERT_(5 > 0, some_message);\n//\n//   (They seem to think the \">\" in \"5 > 0\" marks the end of the\n//   template argument list.)\n//\n// - The array size is (bool(expr) ? 1 : -1), instead of simply\n//\n//     ((expr) ? 1 : -1).\n//\n//   This is to avoid running into a bug in MS VC 7.1, which\n//   causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.\n\n// StaticAssertTypeEqHelper is used by StaticAssertTypeEq defined in gtest.h.\n//\n// This template is declared, but intentionally undefined.\ntemplate <typename T1, typename T2>\nstruct StaticAssertTypeEqHelper;\n\ntemplate <typename T>\nstruct StaticAssertTypeEqHelper<T, T> {\n  enum { value = true };\n};\n\n// Evaluates to the number of elements in 'array'.\n#define GTEST_ARRAY_SIZE_(array) (sizeof(array) / sizeof(array[0]))\n\n#if GTEST_HAS_GLOBAL_STRING\ntypedef ::string string;\n#else\ntypedef ::std::string string;\n#endif  // GTEST_HAS_GLOBAL_STRING\n\n#if GTEST_HAS_GLOBAL_WSTRING\ntypedef ::wstring wstring;\n#elif GTEST_HAS_STD_WSTRING\ntypedef ::std::wstring wstring;\n#endif  // GTEST_HAS_GLOBAL_WSTRING\n\n// A helper for suppressing warnings on constant condition.  It just\n// returns 'condition'.\nGTEST_API_ bool IsTrue(bool condition);\n\n// Defines scoped_ptr.\n\n// This implementation of scoped_ptr is PARTIAL - it only contains\n// enough stuff to satisfy Google Test's need.\ntemplate <typename T>\nclass scoped_ptr {\n public:\n  typedef T element_type;\n\n  explicit scoped_ptr(T* p = NULL) : ptr_(p) {}\n  ~scoped_ptr() { reset(); }\n\n  T& operator*() const { return *ptr_; }\n  T* operator->() const { return ptr_; }\n  T* get() const { return ptr_; }\n\n  T* release() {\n    T* const ptr = ptr_;\n    ptr_ = NULL;\n    return ptr;\n  }\n\n  void reset(T* p = NULL) {\n    if (p != ptr_) {\n      if (IsTrue(sizeof(T) > 0)) {  // Makes sure T is a complete type.\n        delete ptr_;\n      }\n      ptr_ = p;\n    }\n  }\n\n  friend void swap(scoped_ptr& a, scoped_ptr& b) {\n    using std::swap;\n    swap(a.ptr_, b.ptr_);\n  }\n\n private:\n  T* ptr_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr);\n};\n\n// Defines RE.\n\n// A simple C++ wrapper for <regex.h>.  It uses the POSIX Extended\n// Regular Expression syntax.\nclass GTEST_API_ RE {\n public:\n  // A copy constructor is required by the Standard to initialize object\n  // references from r-values.\n  RE(const RE& other) { Init(other.pattern()); }\n\n  // Constructs an RE from a string.\n  RE(const ::std::string& regex) { Init(regex.c_str()); }  // NOLINT\n\n#if GTEST_HAS_GLOBAL_STRING\n\n  RE(const ::string& regex) { Init(regex.c_str()); }  // NOLINT\n\n#endif  // GTEST_HAS_GLOBAL_STRING\n\n  RE(const char* regex) { Init(regex); }  // NOLINT\n  ~RE();\n\n  // Returns the string representation of the regex.\n  const char* pattern() const { return pattern_; }\n\n  // FullMatch(str, re) returns true iff regular expression re matches\n  // the entire str.\n  // PartialMatch(str, re) returns true iff regular expression re\n  // matches a substring of str (including str itself).\n  //\n  // TODO(wan@google.com): make FullMatch() and PartialMatch() work\n  // when str contains NUL characters.\n  static bool FullMatch(const ::std::string& str, const RE& re) {\n    return FullMatch(str.c_str(), re);\n  }\n  static bool PartialMatch(const ::std::string& str, const RE& re) {\n    return PartialMatch(str.c_str(), re);\n  }\n\n#if GTEST_HAS_GLOBAL_STRING\n\n  static bool FullMatch(const ::string& str, const RE& re) {\n    return FullMatch(str.c_str(), re);\n  }\n  static bool PartialMatch(const ::string& str, const RE& re) {\n    return PartialMatch(str.c_str(), re);\n  }\n\n#endif  // GTEST_HAS_GLOBAL_STRING\n\n  static bool FullMatch(const char* str, const RE& re);\n  static bool PartialMatch(const char* str, const RE& re);\n\n private:\n  void Init(const char* regex);\n\n  // We use a const char* instead of an std::string, as Google Test used to be\n  // used where std::string is not available.  TODO(wan@google.com): change to\n  // std::string.\n  const char* pattern_;\n  bool is_valid_;\n\n#if GTEST_USES_POSIX_RE\n\n  regex_t full_regex_;     // For FullMatch().\n  regex_t partial_regex_;  // For PartialMatch().\n\n#else  // GTEST_USES_SIMPLE_RE\n\n  const char* full_pattern_;  // For FullMatch();\n\n#endif\n\n  GTEST_DISALLOW_ASSIGN_(RE);\n};\n\n// Formats a source file path and a line number as they would appear\n// in an error message from the compiler used to compile this code.\nGTEST_API_ ::std::string FormatFileLocation(const char* file, int line);\n\n// Formats a file location for compiler-independent XML output.\n// Although this function is not platform dependent, we put it next to\n// FormatFileLocation in order to contrast the two functions.\nGTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file,\n                                                               int line);\n\n// Defines logging utilities:\n//   GTEST_LOG_(severity) - logs messages at the specified severity level. The\n//                          message itself is streamed into the macro.\n//   LogToStderr()  - directs all log messages to stderr.\n//   FlushInfoLog() - flushes informational log messages.\n\nenum GTestLogSeverity {\n  GTEST_INFO,\n  GTEST_WARNING,\n  GTEST_ERROR,\n  GTEST_FATAL\n};\n\n// Formats log entry severity, provides a stream object for streaming the\n// log message, and terminates the message with a newline when going out of\n// scope.\nclass GTEST_API_ GTestLog {\n public:\n  GTestLog(GTestLogSeverity severity, const char* file, int line);\n\n  // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.\n  ~GTestLog();\n\n  ::std::ostream& GetStream() { return ::std::cerr; }\n\n private:\n  const GTestLogSeverity severity_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog);\n};\n\n#if !defined(GTEST_LOG_)\n\n# define GTEST_LOG_(severity) \\\n    ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \\\n                                  __FILE__, __LINE__).GetStream()\n\ninline void LogToStderr() {}\ninline void FlushInfoLog() { fflush(NULL); }\n\n#endif  // !defined(GTEST_LOG_)\n\n#if !defined(GTEST_CHECK_)\n// INTERNAL IMPLEMENTATION - DO NOT USE.\n//\n// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition\n// is not satisfied.\n//  Synopsys:\n//    GTEST_CHECK_(boolean_condition);\n//     or\n//    GTEST_CHECK_(boolean_condition) << \"Additional message\";\n//\n//    This checks the condition and if the condition is not satisfied\n//    it prints message about the condition violation, including the\n//    condition itself, plus additional message streamed into it, if any,\n//    and then it aborts the program. It aborts the program irrespective of\n//    whether it is built in the debug mode or not.\n# define GTEST_CHECK_(condition) \\\n    GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\\n    if (::testing::internal::IsTrue(condition)) \\\n      ; \\\n    else \\\n      GTEST_LOG_(FATAL) << \"Condition \" #condition \" failed. \"\n#endif  // !defined(GTEST_CHECK_)\n\n// An all-mode assert to verify that the given POSIX-style function\n// call returns 0 (indicating success).  Known limitation: this\n// doesn't expand to a balanced 'if' statement, so enclose the macro\n// in {} if you need to use it as the only statement in an 'if'\n// branch.\n#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \\\n  if (const int gtest_error = (posix_call)) \\\n    GTEST_LOG_(FATAL) << #posix_call << \"failed with error \" \\\n                      << gtest_error\n\n#if GTEST_HAS_STD_MOVE_\nusing std::move;\n#else  // GTEST_HAS_STD_MOVE_\ntemplate <typename T>\nconst T& move(const T& t) {\n  return t;\n}\n#endif  // GTEST_HAS_STD_MOVE_\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// Use ImplicitCast_ as a safe version of static_cast for upcasting in\n// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a\n// const Foo*).  When you use ImplicitCast_, the compiler checks that\n// the cast is safe.  Such explicit ImplicitCast_s are necessary in\n// surprisingly many situations where C++ demands an exact type match\n// instead of an argument type convertable to a target type.\n//\n// The syntax for using ImplicitCast_ is the same as for static_cast:\n//\n//   ImplicitCast_<ToType>(expr)\n//\n// ImplicitCast_ would have been part of the C++ standard library,\n// but the proposal was submitted too late.  It will probably make\n// its way into the language in the future.\n//\n// This relatively ugly name is intentional. It prevents clashes with\n// similar functions users may have (e.g., implicit_cast). The internal\n// namespace alone is not enough because the function can be found by ADL.\ntemplate<typename To>\ninline To ImplicitCast_(To x) { return x; }\n\n// When you upcast (that is, cast a pointer from type Foo to type\n// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts\n// always succeed.  When you downcast (that is, cast a pointer from\n// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because\n// how do you know the pointer is really of type SubclassOfFoo?  It\n// could be a bare Foo, or of type DifferentSubclassOfFoo.  Thus,\n// when you downcast, you should use this macro.  In debug mode, we\n// use dynamic_cast<> to double-check the downcast is legal (we die\n// if it's not).  In normal mode, we do the efficient static_cast<>\n// instead.  Thus, it's important to test in debug mode to make sure\n// the cast is legal!\n//    This is the only place in the code we should use dynamic_cast<>.\n// In particular, you SHOULDN'T be using dynamic_cast<> in order to\n// do RTTI (eg code like this:\n//    if (dynamic_cast<Subclass1>(foo)) HandleASubclass1Object(foo);\n//    if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);\n// You should design the code some other way not to need this.\n//\n// This relatively ugly name is intentional. It prevents clashes with\n// similar functions users may have (e.g., down_cast). The internal\n// namespace alone is not enough because the function can be found by ADL.\ntemplate<typename To, typename From>  // use like this: DownCast_<T*>(foo);\ninline To DownCast_(From* f) {  // so we only accept pointers\n  // Ensures that To is a sub-type of From *.  This test is here only\n  // for compile-time type checking, and has no overhead in an\n  // optimized build at run-time, as it will be optimized away\n  // completely.\n  GTEST_INTENTIONAL_CONST_COND_PUSH_()\n  if (false) {\n  GTEST_INTENTIONAL_CONST_COND_POP_()\n    const To to = NULL;\n    ::testing::internal::ImplicitCast_<From*>(to);\n  }\n\n#if GTEST_HAS_RTTI\n  // RTTI: debug mode only!\n  GTEST_CHECK_(f == NULL || dynamic_cast<To>(f) != NULL);\n#endif\n  return static_cast<To>(f);\n}\n\n// Downcasts the pointer of type Base to Derived.\n// Derived must be a subclass of Base. The parameter MUST\n// point to a class of type Derived, not any subclass of it.\n// When RTTI is available, the function performs a runtime\n// check to enforce this.\ntemplate <class Derived, class Base>\nDerived* CheckedDowncastToActualType(Base* base) {\n#if GTEST_HAS_RTTI\n  GTEST_CHECK_(typeid(*base) == typeid(Derived));\n#endif\n\n#if GTEST_HAS_DOWNCAST_\n  return ::down_cast<Derived*>(base);\n#elif GTEST_HAS_RTTI\n  return dynamic_cast<Derived*>(base);  // NOLINT\n#else\n  return static_cast<Derived*>(base);  // Poor man's downcast.\n#endif\n}\n\n#if GTEST_HAS_STREAM_REDIRECTION\n\n// Defines the stderr capturer:\n//   CaptureStdout     - starts capturing stdout.\n//   GetCapturedStdout - stops capturing stdout and returns the captured string.\n//   CaptureStderr     - starts capturing stderr.\n//   GetCapturedStderr - stops capturing stderr and returns the captured string.\n//\nGTEST_API_ void CaptureStdout();\nGTEST_API_ std::string GetCapturedStdout();\nGTEST_API_ void CaptureStderr();\nGTEST_API_ std::string GetCapturedStderr();\n\n#endif  // GTEST_HAS_STREAM_REDIRECTION\n\n// Returns a path to temporary directory.\nGTEST_API_ std::string TempDir();\n\n// Returns the size (in bytes) of a file.\nGTEST_API_ size_t GetFileSize(FILE* file);\n\n// Reads the entire content of a file as a string.\nGTEST_API_ std::string ReadEntireFile(FILE* file);\n\n// All command line arguments.\nGTEST_API_ const ::std::vector<testing::internal::string>& GetArgvs();\n\n#if GTEST_HAS_DEATH_TEST\n\nconst ::std::vector<testing::internal::string>& GetInjectableArgvs();\nvoid SetInjectableArgvs(const ::std::vector<testing::internal::string>*\n                             new_argvs);\n\n\n#endif  // GTEST_HAS_DEATH_TEST\n\n// Defines synchronization primitives.\n#if GTEST_IS_THREADSAFE\n# if GTEST_HAS_PTHREAD\n// Sleeps for (roughly) n milliseconds.  This function is only for testing\n// Google Test's own constructs.  Don't use it in user tests, either\n// directly or indirectly.\ninline void SleepMilliseconds(int n) {\n  const timespec time = {\n    0,                  // 0 seconds.\n    n * 1000L * 1000L,  // And n ms.\n  };\n  nanosleep(&time, NULL);\n}\n# endif  // GTEST_HAS_PTHREAD\n\n# if GTEST_HAS_NOTIFICATION_\n// Notification has already been imported into the namespace.\n// Nothing to do here.\n\n# elif GTEST_HAS_PTHREAD\n// Allows a controller thread to pause execution of newly created\n// threads until notified.  Instances of this class must be created\n// and destroyed in the controller thread.\n//\n// This class is only for testing Google Test's own constructs. Do not\n// use it in user tests, either directly or indirectly.\nclass Notification {\n public:\n  Notification() : notified_(false) {\n    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));\n  }\n  ~Notification() {\n    pthread_mutex_destroy(&mutex_);\n  }\n\n  // Notifies all threads created with this notification to start. Must\n  // be called from the controller thread.\n  void Notify() {\n    pthread_mutex_lock(&mutex_);\n    notified_ = true;\n    pthread_mutex_unlock(&mutex_);\n  }\n\n  // Blocks until the controller thread notifies. Must be called from a test\n  // thread.\n  void WaitForNotification() {\n    for (;;) {\n      pthread_mutex_lock(&mutex_);\n      const bool notified = notified_;\n      pthread_mutex_unlock(&mutex_);\n      if (notified)\n        break;\n      SleepMilliseconds(10);\n    }\n  }\n\n private:\n  pthread_mutex_t mutex_;\n  bool notified_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);\n};\n\n# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT\n\nGTEST_API_ void SleepMilliseconds(int n);\n\n// Provides leak-safe Windows kernel handle ownership.\n// Used in death tests and in threading support.\nclass GTEST_API_ AutoHandle {\n public:\n  // Assume that Win32 HANDLE type is equivalent to void*. Doing so allows us to\n  // avoid including <windows.h> in this header file. Including <windows.h> is\n  // undesirable because it defines a lot of symbols and macros that tend to\n  // conflict with client code. This assumption is verified by\n  // WindowsTypesTest.HANDLEIsVoidStar.\n  typedef void* Handle;\n  AutoHandle();\n  explicit AutoHandle(Handle handle);\n\n  ~AutoHandle();\n\n  Handle Get() const;\n  void Reset();\n  void Reset(Handle handle);\n\n private:\n  // Returns true iff the handle is a valid handle object that can be closed.\n  bool IsCloseable() const;\n\n  Handle handle_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle);\n};\n\n// Allows a controller thread to pause execution of newly created\n// threads until notified.  Instances of this class must be created\n// and destroyed in the controller thread.\n//\n// This class is only for testing Google Test's own constructs. Do not\n// use it in user tests, either directly or indirectly.\nclass GTEST_API_ Notification {\n public:\n  Notification();\n  void Notify();\n  void WaitForNotification();\n\n private:\n  AutoHandle event_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);\n};\n# endif  // GTEST_HAS_NOTIFICATION_\n\n// On MinGW, we can have both GTEST_OS_WINDOWS and GTEST_HAS_PTHREAD\n// defined, but we don't want to use MinGW's pthreads implementation, which\n// has conformance problems with some versions of the POSIX standard.\n# if GTEST_HAS_PTHREAD && !GTEST_OS_WINDOWS_MINGW\n\n// As a C-function, ThreadFuncWithCLinkage cannot be templated itself.\n// Consequently, it cannot select a correct instantiation of ThreadWithParam\n// in order to call its Run(). Introducing ThreadWithParamBase as a\n// non-templated base class for ThreadWithParam allows us to bypass this\n// problem.\nclass ThreadWithParamBase {\n public:\n  virtual ~ThreadWithParamBase() {}\n  virtual void Run() = 0;\n};\n\n// pthread_create() accepts a pointer to a function type with the C linkage.\n// According to the Standard (7.5/1), function types with different linkages\n// are different even if they are otherwise identical.  Some compilers (for\n// example, SunStudio) treat them as different types.  Since class methods\n// cannot be defined with C-linkage we need to define a free C-function to\n// pass into pthread_create().\nextern \"C\" inline void* ThreadFuncWithCLinkage(void* thread) {\n  static_cast<ThreadWithParamBase*>(thread)->Run();\n  return NULL;\n}\n\n// Helper class for testing Google Test's multi-threading constructs.\n// To use it, write:\n//\n//   void ThreadFunc(int param) { /* Do things with param */ }\n//   Notification thread_can_start;\n//   ...\n//   // The thread_can_start parameter is optional; you can supply NULL.\n//   ThreadWithParam<int> thread(&ThreadFunc, 5, &thread_can_start);\n//   thread_can_start.Notify();\n//\n// These classes are only for testing Google Test's own constructs. Do\n// not use them in user tests, either directly or indirectly.\ntemplate <typename T>\nclass ThreadWithParam : public ThreadWithParamBase {\n public:\n  typedef void UserThreadFunc(T);\n\n  ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start)\n      : func_(func),\n        param_(param),\n        thread_can_start_(thread_can_start),\n        finished_(false) {\n    ThreadWithParamBase* const base = this;\n    // The thread can be created only after all fields except thread_\n    // have been initialized.\n    GTEST_CHECK_POSIX_SUCCESS_(\n        pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base));\n  }\n  ~ThreadWithParam() { Join(); }\n\n  void Join() {\n    if (!finished_) {\n      GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0));\n      finished_ = true;\n    }\n  }\n\n  virtual void Run() {\n    if (thread_can_start_ != NULL)\n      thread_can_start_->WaitForNotification();\n    func_(param_);\n  }\n\n private:\n  UserThreadFunc* const func_;  // User-supplied thread function.\n  const T param_;  // User-supplied parameter to the thread function.\n  // When non-NULL, used to block execution until the controller thread\n  // notifies.\n  Notification* const thread_can_start_;\n  bool finished_;  // true iff we know that the thread function has finished.\n  pthread_t thread_;  // The native thread object.\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);\n};\n# endif  // !GTEST_OS_WINDOWS && GTEST_HAS_PTHREAD ||\n         // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_\n\n# if GTEST_HAS_MUTEX_AND_THREAD_LOCAL_\n// Mutex and ThreadLocal have already been imported into the namespace.\n// Nothing to do here.\n\n# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT\n\n// Mutex implements mutex on Windows platforms.  It is used in conjunction\n// with class MutexLock:\n//\n//   Mutex mutex;\n//   ...\n//   MutexLock lock(&mutex);  // Acquires the mutex and releases it at the\n//                            // end of the current scope.\n//\n// A static Mutex *must* be defined or declared using one of the following\n// macros:\n//   GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex);\n//   GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex);\n//\n// (A non-static Mutex is defined/declared in the usual way).\nclass GTEST_API_ Mutex {\n public:\n  enum MutexType { kStatic = 0, kDynamic = 1 };\n  // We rely on kStaticMutex being 0 as it is to what the linker initializes\n  // type_ in static mutexes.  critical_section_ will be initialized lazily\n  // in ThreadSafeLazyInit().\n  enum StaticConstructorSelector { kStaticMutex = 0 };\n\n  // This constructor intentionally does nothing.  It relies on type_ being\n  // statically initialized to 0 (effectively setting it to kStatic) and on\n  // ThreadSafeLazyInit() to lazily initialize the rest of the members.\n  explicit Mutex(StaticConstructorSelector /*dummy*/) {}\n\n  Mutex();\n  ~Mutex();\n\n  void Lock();\n\n  void Unlock();\n\n  // Does nothing if the current thread holds the mutex. Otherwise, crashes\n  // with high probability.\n  void AssertHeld();\n\n private:\n  // Initializes owner_thread_id_ and critical_section_ in static mutexes.\n  void ThreadSafeLazyInit();\n\n  // Per http://blogs.msdn.com/b/oldnewthing/archive/2004/02/23/78395.aspx,\n  // we assume that 0 is an invalid value for thread IDs.\n  unsigned int owner_thread_id_;\n\n  // For static mutexes, we rely on these members being initialized to zeros\n  // by the linker.\n  MutexType type_;\n  long critical_section_init_phase_;  // NOLINT\n  _RTL_CRITICAL_SECTION* critical_section_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);\n};\n\n# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \\\n    extern ::testing::internal::Mutex mutex\n\n# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \\\n    ::testing::internal::Mutex mutex(::testing::internal::Mutex::kStaticMutex)\n\n// We cannot name this class MutexLock because the ctor declaration would\n// conflict with a macro named MutexLock, which is defined on some\n// platforms. That macro is used as a defensive measure to prevent against\n// inadvertent misuses of MutexLock like \"MutexLock(&mu)\" rather than\n// \"MutexLock l(&mu)\".  Hence the typedef trick below.\nclass GTestMutexLock {\n public:\n  explicit GTestMutexLock(Mutex* mutex)\n      : mutex_(mutex) { mutex_->Lock(); }\n\n  ~GTestMutexLock() { mutex_->Unlock(); }\n\n private:\n  Mutex* const mutex_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);\n};\n\ntypedef GTestMutexLock MutexLock;\n\n// Base class for ValueHolder<T>.  Allows a caller to hold and delete a value\n// without knowing its type.\nclass ThreadLocalValueHolderBase {\n public:\n  virtual ~ThreadLocalValueHolderBase() {}\n};\n\n// Provides a way for a thread to send notifications to a ThreadLocal\n// regardless of its parameter type.\nclass ThreadLocalBase {\n public:\n  // Creates a new ValueHolder<T> object holding a default value passed to\n  // this ThreadLocal<T>'s constructor and returns it.  It is the caller's\n  // responsibility not to call this when the ThreadLocal<T> instance already\n  // has a value on the current thread.\n  virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const = 0;\n\n protected:\n  ThreadLocalBase() {}\n  virtual ~ThreadLocalBase() {}\n\n private:\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocalBase);\n};\n\n// Maps a thread to a set of ThreadLocals that have values instantiated on that\n// thread and notifies them when the thread exits.  A ThreadLocal instance is\n// expected to persist until all threads it has values on have terminated.\nclass GTEST_API_ ThreadLocalRegistry {\n public:\n  // Registers thread_local_instance as having value on the current thread.\n  // Returns a value that can be used to identify the thread from other threads.\n  static ThreadLocalValueHolderBase* GetValueOnCurrentThread(\n      const ThreadLocalBase* thread_local_instance);\n\n  // Invoked when a ThreadLocal instance is destroyed.\n  static void OnThreadLocalDestroyed(\n      const ThreadLocalBase* thread_local_instance);\n};\n\nclass GTEST_API_ ThreadWithParamBase {\n public:\n  void Join();\n\n protected:\n  class Runnable {\n   public:\n    virtual ~Runnable() {}\n    virtual void Run() = 0;\n  };\n\n  ThreadWithParamBase(Runnable *runnable, Notification* thread_can_start);\n  virtual ~ThreadWithParamBase();\n\n private:\n  AutoHandle thread_;\n};\n\n// Helper class for testing Google Test's multi-threading constructs.\ntemplate <typename T>\nclass ThreadWithParam : public ThreadWithParamBase {\n public:\n  typedef void UserThreadFunc(T);\n\n  ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start)\n      : ThreadWithParamBase(new RunnableImpl(func, param), thread_can_start) {\n  }\n  virtual ~ThreadWithParam() {}\n\n private:\n  class RunnableImpl : public Runnable {\n   public:\n    RunnableImpl(UserThreadFunc* func, T param)\n        : func_(func),\n          param_(param) {\n    }\n    virtual ~RunnableImpl() {}\n    virtual void Run() {\n      func_(param_);\n    }\n\n   private:\n    UserThreadFunc* const func_;\n    const T param_;\n\n    GTEST_DISALLOW_COPY_AND_ASSIGN_(RunnableImpl);\n  };\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);\n};\n\n// Implements thread-local storage on Windows systems.\n//\n//   // Thread 1\n//   ThreadLocal<int> tl(100);  // 100 is the default value for each thread.\n//\n//   // Thread 2\n//   tl.set(150);  // Changes the value for thread 2 only.\n//   EXPECT_EQ(150, tl.get());\n//\n//   // Thread 1\n//   EXPECT_EQ(100, tl.get());  // In thread 1, tl has the original value.\n//   tl.set(200);\n//   EXPECT_EQ(200, tl.get());\n//\n// The template type argument T must have a public copy constructor.\n// In addition, the default ThreadLocal constructor requires T to have\n// a public default constructor.\n//\n// The users of a TheadLocal instance have to make sure that all but one\n// threads (including the main one) using that instance have exited before\n// destroying it. Otherwise, the per-thread objects managed for them by the\n// ThreadLocal instance are not guaranteed to be destroyed on all platforms.\n//\n// Google Test only uses global ThreadLocal objects.  That means they\n// will die after main() has returned.  Therefore, no per-thread\n// object managed by Google Test will be leaked as long as all threads\n// using Google Test have exited when main() returns.\ntemplate <typename T>\nclass ThreadLocal : public ThreadLocalBase {\n public:\n  ThreadLocal() : default_factory_(new DefaultValueHolderFactory()) {}\n  explicit ThreadLocal(const T& value)\n      : default_factory_(new InstanceValueHolderFactory(value)) {}\n\n  ~ThreadLocal() { ThreadLocalRegistry::OnThreadLocalDestroyed(this); }\n\n  T* pointer() { return GetOrCreateValue(); }\n  const T* pointer() const { return GetOrCreateValue(); }\n  const T& get() const { return *pointer(); }\n  void set(const T& value) { *pointer() = value; }\n\n private:\n  // Holds a value of T.  Can be deleted via its base class without the caller\n  // knowing the type of T.\n  class ValueHolder : public ThreadLocalValueHolderBase {\n   public:\n    ValueHolder() : value_() {}\n    explicit ValueHolder(const T& value) : value_(value) {}\n\n    T* pointer() { return &value_; }\n\n   private:\n    T value_;\n    GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);\n  };\n\n\n  T* GetOrCreateValue() const {\n    return static_cast<ValueHolder*>(\n        ThreadLocalRegistry::GetValueOnCurrentThread(this))->pointer();\n  }\n\n  virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const {\n    return default_factory_->MakeNewHolder();\n  }\n\n  class ValueHolderFactory {\n   public:\n    ValueHolderFactory() {}\n    virtual ~ValueHolderFactory() {}\n    virtual ValueHolder* MakeNewHolder() const = 0;\n\n   private:\n    GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolderFactory);\n  };\n\n  class DefaultValueHolderFactory : public ValueHolderFactory {\n   public:\n    DefaultValueHolderFactory() {}\n    virtual ValueHolder* MakeNewHolder() const { return new ValueHolder(); }\n\n   private:\n    GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultValueHolderFactory);\n  };\n\n  class InstanceValueHolderFactory : public ValueHolderFactory {\n   public:\n    explicit InstanceValueHolderFactory(const T& value) : value_(value) {}\n    virtual ValueHolder* MakeNewHolder() const {\n      return new ValueHolder(value_);\n    }\n\n   private:\n    const T value_;  // The value for each thread.\n\n    GTEST_DISALLOW_COPY_AND_ASSIGN_(InstanceValueHolderFactory);\n  };\n\n  scoped_ptr<ValueHolderFactory> default_factory_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);\n};\n\n# elif GTEST_HAS_PTHREAD\n\n// MutexBase and Mutex implement mutex on pthreads-based platforms.\nclass MutexBase {\n public:\n  // Acquires this mutex.\n  void Lock() {\n    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_));\n    owner_ = pthread_self();\n    has_owner_ = true;\n  }\n\n  // Releases this mutex.\n  void Unlock() {\n    // Since the lock is being released the owner_ field should no longer be\n    // considered valid. We don't protect writing to has_owner_ here, as it's\n    // the caller's responsibility to ensure that the current thread holds the\n    // mutex when this is called.\n    has_owner_ = false;\n    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_));\n  }\n\n  // Does nothing if the current thread holds the mutex. Otherwise, crashes\n  // with high probability.\n  void AssertHeld() const {\n    GTEST_CHECK_(has_owner_ && pthread_equal(owner_, pthread_self()))\n        << \"The current thread is not holding the mutex @\" << this;\n  }\n\n  // A static mutex may be used before main() is entered.  It may even\n  // be used before the dynamic initialization stage.  Therefore we\n  // must be able to initialize a static mutex object at link time.\n  // This means MutexBase has to be a POD and its member variables\n  // have to be public.\n public:\n  pthread_mutex_t mutex_;  // The underlying pthread mutex.\n  // has_owner_ indicates whether the owner_ field below contains a valid thread\n  // ID and is therefore safe to inspect (e.g., to use in pthread_equal()). All\n  // accesses to the owner_ field should be protected by a check of this field.\n  // An alternative might be to memset() owner_ to all zeros, but there's no\n  // guarantee that a zero'd pthread_t is necessarily invalid or even different\n  // from pthread_self().\n  bool has_owner_;\n  pthread_t owner_;  // The thread holding the mutex.\n};\n\n// Forward-declares a static mutex.\n#  define GTEST_DECLARE_STATIC_MUTEX_(mutex) \\\n     extern ::testing::internal::MutexBase mutex\n\n// Defines and statically (i.e. at link time) initializes a static mutex.\n#  define GTEST_DEFINE_STATIC_MUTEX_(mutex) \\\n     ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, false, pthread_t() }\n\n// The Mutex class can only be used for mutexes created at runtime. It\n// shares its API with MutexBase otherwise.\nclass Mutex : public MutexBase {\n public:\n  Mutex() {\n    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));\n    has_owner_ = false;\n  }\n  ~Mutex() {\n    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_));\n  }\n\n private:\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);\n};\n\n// We cannot name this class MutexLock because the ctor declaration would\n// conflict with a macro named MutexLock, which is defined on some\n// platforms. That macro is used as a defensive measure to prevent against\n// inadvertent misuses of MutexLock like \"MutexLock(&mu)\" rather than\n// \"MutexLock l(&mu)\".  Hence the typedef trick below.\nclass GTestMutexLock {\n public:\n  explicit GTestMutexLock(MutexBase* mutex)\n      : mutex_(mutex) { mutex_->Lock(); }\n\n  ~GTestMutexLock() { mutex_->Unlock(); }\n\n private:\n  MutexBase* const mutex_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);\n};\n\ntypedef GTestMutexLock MutexLock;\n\n// Helpers for ThreadLocal.\n\n// pthread_key_create() requires DeleteThreadLocalValue() to have\n// C-linkage.  Therefore it cannot be templatized to access\n// ThreadLocal<T>.  Hence the need for class\n// ThreadLocalValueHolderBase.\nclass ThreadLocalValueHolderBase {\n public:\n  virtual ~ThreadLocalValueHolderBase() {}\n};\n\n// Called by pthread to delete thread-local data stored by\n// pthread_setspecific().\nextern \"C\" inline void DeleteThreadLocalValue(void* value_holder) {\n  delete static_cast<ThreadLocalValueHolderBase*>(value_holder);\n}\n\n// Implements thread-local storage on pthreads-based systems.\ntemplate <typename T>\nclass ThreadLocal {\n public:\n  ThreadLocal()\n      : key_(CreateKey()), default_factory_(new DefaultValueHolderFactory()) {}\n  explicit ThreadLocal(const T& value)\n      : key_(CreateKey()),\n        default_factory_(new InstanceValueHolderFactory(value)) {}\n\n  ~ThreadLocal() {\n    // Destroys the managed object for the current thread, if any.\n    DeleteThreadLocalValue(pthread_getspecific(key_));\n\n    // Releases resources associated with the key.  This will *not*\n    // delete managed objects for other threads.\n    GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_));\n  }\n\n  T* pointer() { return GetOrCreateValue(); }\n  const T* pointer() const { return GetOrCreateValue(); }\n  const T& get() const { return *pointer(); }\n  void set(const T& value) { *pointer() = value; }\n\n private:\n  // Holds a value of type T.\n  class ValueHolder : public ThreadLocalValueHolderBase {\n   public:\n    ValueHolder() : value_() {}\n    explicit ValueHolder(const T& value) : value_(value) {}\n\n    T* pointer() { return &value_; }\n\n   private:\n    T value_;\n    GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);\n  };\n\n  static pthread_key_t CreateKey() {\n    pthread_key_t key;\n    // When a thread exits, DeleteThreadLocalValue() will be called on\n    // the object managed for that thread.\n    GTEST_CHECK_POSIX_SUCCESS_(\n        pthread_key_create(&key, &DeleteThreadLocalValue));\n    return key;\n  }\n\n  T* GetOrCreateValue() const {\n    ThreadLocalValueHolderBase* const holder =\n        static_cast<ThreadLocalValueHolderBase*>(pthread_getspecific(key_));\n    if (holder != NULL) {\n      return CheckedDowncastToActualType<ValueHolder>(holder)->pointer();\n    }\n\n    ValueHolder* const new_holder = default_factory_->MakeNewHolder();\n    ThreadLocalValueHolderBase* const holder_base = new_holder;\n    GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base));\n    return new_holder->pointer();\n  }\n\n  class ValueHolderFactory {\n   public:\n    ValueHolderFactory() {}\n    virtual ~ValueHolderFactory() {}\n    virtual ValueHolder* MakeNewHolder() const = 0;\n\n   private:\n    GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolderFactory);\n  };\n\n  class DefaultValueHolderFactory : public ValueHolderFactory {\n   public:\n    DefaultValueHolderFactory() {}\n    virtual ValueHolder* MakeNewHolder() const { return new ValueHolder(); }\n\n   private:\n    GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultValueHolderFactory);\n  };\n\n  class InstanceValueHolderFactory : public ValueHolderFactory {\n   public:\n    explicit InstanceValueHolderFactory(const T& value) : value_(value) {}\n    virtual ValueHolder* MakeNewHolder() const {\n      return new ValueHolder(value_);\n    }\n\n   private:\n    const T value_;  // The value for each thread.\n\n    GTEST_DISALLOW_COPY_AND_ASSIGN_(InstanceValueHolderFactory);\n  };\n\n  // A key pthreads uses for looking up per-thread values.\n  const pthread_key_t key_;\n  scoped_ptr<ValueHolderFactory> default_factory_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);\n};\n\n# endif  // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_\n\n#else  // GTEST_IS_THREADSAFE\n\n// A dummy implementation of synchronization primitives (mutex, lock,\n// and thread-local variable).  Necessary for compiling Google Test where\n// mutex is not supported - using Google Test in multiple threads is not\n// supported on such platforms.\n\nclass Mutex {\n public:\n  Mutex() {}\n  void Lock() {}\n  void Unlock() {}\n  void AssertHeld() const {}\n};\n\n# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \\\n  extern ::testing::internal::Mutex mutex\n\n# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex\n\n// We cannot name this class MutexLock because the ctor declaration would\n// conflict with a macro named MutexLock, which is defined on some\n// platforms. That macro is used as a defensive measure to prevent against\n// inadvertent misuses of MutexLock like \"MutexLock(&mu)\" rather than\n// \"MutexLock l(&mu)\".  Hence the typedef trick below.\nclass GTestMutexLock {\n public:\n  explicit GTestMutexLock(Mutex*) {}  // NOLINT\n};\n\ntypedef GTestMutexLock MutexLock;\n\ntemplate <typename T>\nclass ThreadLocal {\n public:\n  ThreadLocal() : value_() {}\n  explicit ThreadLocal(const T& value) : value_(value) {}\n  T* pointer() { return &value_; }\n  const T* pointer() const { return &value_; }\n  const T& get() const { return value_; }\n  void set(const T& value) { value_ = value; }\n private:\n  T value_;\n};\n\n#endif  // GTEST_IS_THREADSAFE\n\n// Returns the number of threads running in the process, or 0 to indicate that\n// we cannot detect it.\nGTEST_API_ size_t GetThreadCount();\n\n// Passing non-POD classes through ellipsis (...) crashes the ARM\n// compiler and generates a warning in Sun Studio.  The Nokia Symbian\n// and the IBM XL C/C++ compiler try to instantiate a copy constructor\n// for objects passed through ellipsis (...), failing for uncopyable\n// objects.  We define this to ensure that only POD is passed through\n// ellipsis on these systems.\n#if defined(__SYMBIAN32__) || defined(__IBMCPP__) || defined(__SUNPRO_CC)\n// We lose support for NULL detection where the compiler doesn't like\n// passing non-POD classes through ellipsis (...).\n# define GTEST_ELLIPSIS_NEEDS_POD_ 1\n#else\n# define GTEST_CAN_COMPARE_NULL 1\n#endif\n\n// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between\n// const T& and const T* in a function template.  These compilers\n// _can_ decide between class template specializations for T and T*,\n// so a tr1::type_traits-like is_pointer works.\n#if defined(__SYMBIAN32__) || defined(__IBMCPP__)\n# define GTEST_NEEDS_IS_POINTER_ 1\n#endif\n\ntemplate <bool bool_value>\nstruct bool_constant {\n  typedef bool_constant<bool_value> type;\n  static const bool value = bool_value;\n};\ntemplate <bool bool_value> const bool bool_constant<bool_value>::value;\n\ntypedef bool_constant<false> false_type;\ntypedef bool_constant<true> true_type;\n\ntemplate <typename T>\nstruct is_pointer : public false_type {};\n\ntemplate <typename T>\nstruct is_pointer<T*> : public true_type {};\n\ntemplate <typename Iterator>\nstruct IteratorTraits {\n  typedef typename Iterator::value_type value_type;\n};\n\ntemplate <typename T>\nstruct IteratorTraits<T*> {\n  typedef T value_type;\n};\n\ntemplate <typename T>\nstruct IteratorTraits<const T*> {\n  typedef T value_type;\n};\n\n#if GTEST_OS_WINDOWS\n# define GTEST_PATH_SEP_ \"\\\\\"\n# define GTEST_HAS_ALT_PATH_SEP_ 1\n// The biggest signed integer type the compiler supports.\ntypedef __int64 BiggestInt;\n#else\n# define GTEST_PATH_SEP_ \"/\"\n# define GTEST_HAS_ALT_PATH_SEP_ 0\ntypedef long long BiggestInt;  // NOLINT\n#endif  // GTEST_OS_WINDOWS\n\n// Utilities for char.\n\n// isspace(int ch) and friends accept an unsigned char or EOF.  char\n// may be signed, depending on the compiler (or compiler flags).\n// Therefore we need to cast a char to unsigned char before calling\n// isspace(), etc.\n\ninline bool IsAlpha(char ch) {\n  return isalpha(static_cast<unsigned char>(ch)) != 0;\n}\ninline bool IsAlNum(char ch) {\n  return isalnum(static_cast<unsigned char>(ch)) != 0;\n}\ninline bool IsDigit(char ch) {\n  return isdigit(static_cast<unsigned char>(ch)) != 0;\n}\ninline bool IsLower(char ch) {\n  return islower(static_cast<unsigned char>(ch)) != 0;\n}\ninline bool IsSpace(char ch) {\n  return isspace(static_cast<unsigned char>(ch)) != 0;\n}\ninline bool IsUpper(char ch) {\n  return isupper(static_cast<unsigned char>(ch)) != 0;\n}\ninline bool IsXDigit(char ch) {\n  return isxdigit(static_cast<unsigned char>(ch)) != 0;\n}\ninline bool IsXDigit(wchar_t ch) {\n  const unsigned char low_byte = static_cast<unsigned char>(ch);\n  return ch == low_byte && isxdigit(low_byte) != 0;\n}\n\ninline char ToLower(char ch) {\n  return static_cast<char>(tolower(static_cast<unsigned char>(ch)));\n}\ninline char ToUpper(char ch) {\n  return static_cast<char>(toupper(static_cast<unsigned char>(ch)));\n}\n\ninline std::string StripTrailingSpaces(std::string str) {\n  std::string::iterator it = str.end();\n  while (it != str.begin() && IsSpace(*--it))\n    it = str.erase(it);\n  return str;\n}\n\n// The testing::internal::posix namespace holds wrappers for common\n// POSIX functions.  These wrappers hide the differences between\n// Windows/MSVC and POSIX systems.  Since some compilers define these\n// standard functions as macros, the wrapper cannot have the same name\n// as the wrapped function.\n\nnamespace posix {\n\n// Functions with a different name on Windows.\n\n#if GTEST_OS_WINDOWS\n\ntypedef struct _stat StatStruct;\n\n# ifdef __BORLANDC__\ninline int IsATTY(int fd) { return isatty(fd); }\ninline int StrCaseCmp(const char* s1, const char* s2) {\n  return stricmp(s1, s2);\n}\ninline char* StrDup(const char* src) { return strdup(src); }\n# else  // !__BORLANDC__\n#  if GTEST_OS_WINDOWS_MOBILE\ninline int IsATTY(int /* fd */) { return 0; }\n#  else\ninline int IsATTY(int fd) { return _isatty(fd); }\n#  endif  // GTEST_OS_WINDOWS_MOBILE\ninline int StrCaseCmp(const char* s1, const char* s2) {\n  return _stricmp(s1, s2);\n}\ninline char* StrDup(const char* src) { return _strdup(src); }\n# endif  // __BORLANDC__\n\n# if GTEST_OS_WINDOWS_MOBILE\ninline int FileNo(FILE* file) { return reinterpret_cast<int>(_fileno(file)); }\n// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this\n// time and thus not defined there.\n# else\ninline int FileNo(FILE* file) { return _fileno(file); }\ninline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); }\ninline int RmDir(const char* dir) { return _rmdir(dir); }\ninline bool IsDir(const StatStruct& st) {\n  return (_S_IFDIR & st.st_mode) != 0;\n}\n# endif  // GTEST_OS_WINDOWS_MOBILE\n\n#else\n\ntypedef struct stat StatStruct;\n\ninline int FileNo(FILE* file) { return fileno(file); }\ninline int IsATTY(int fd) { return isatty(fd); }\ninline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); }\ninline int StrCaseCmp(const char* s1, const char* s2) {\n  return strcasecmp(s1, s2);\n}\ninline char* StrDup(const char* src) { return strdup(src); }\ninline int RmDir(const char* dir) { return rmdir(dir); }\ninline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); }\n\n#endif  // GTEST_OS_WINDOWS\n\n// Functions deprecated by MSVC 8.0.\n\nGTEST_DISABLE_MSC_WARNINGS_PUSH_(4996 /* deprecated function */)\n\ninline const char* StrNCpy(char* dest, const char* src, size_t n) {\n  return strncpy(dest, src, n);\n}\n\n// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and\n// StrError() aren't needed on Windows CE at this time and thus not\n// defined there.\n\n#if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT\ninline int ChDir(const char* dir) { return chdir(dir); }\n#endif\ninline FILE* FOpen(const char* path, const char* mode) {\n  return fopen(path, mode);\n}\n#if !GTEST_OS_WINDOWS_MOBILE\ninline FILE *FReopen(const char* path, const char* mode, FILE* stream) {\n  return freopen(path, mode, stream);\n}\ninline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); }\n#endif\ninline int FClose(FILE* fp) { return fclose(fp); }\n#if !GTEST_OS_WINDOWS_MOBILE\ninline int Read(int fd, void* buf, unsigned int count) {\n  return static_cast<int>(read(fd, buf, count));\n}\ninline int Write(int fd, const void* buf, unsigned int count) {\n  return static_cast<int>(write(fd, buf, count));\n}\ninline int Close(int fd) { return close(fd); }\ninline const char* StrError(int errnum) { return strerror(errnum); }\n#endif\ninline const char* GetEnv(const char* name) {\n#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE | GTEST_OS_WINDOWS_RT\n  // We are on Windows CE, which has no environment variables.\n  static_cast<void>(name);  // To prevent 'unused argument' warning.\n  return NULL;\n#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9)\n  // Environment variables which we programmatically clear will be set to the\n  // empty string rather than unset (NULL).  Handle that case.\n  const char* const env = getenv(name);\n  return (env != NULL && env[0] != '\\0') ? env : NULL;\n#else\n  return getenv(name);\n#endif\n}\n\nGTEST_DISABLE_MSC_WARNINGS_POP_()\n\n#if GTEST_OS_WINDOWS_MOBILE\n// Windows CE has no C library. The abort() function is used in\n// several places in Google Test. This implementation provides a reasonable\n// imitation of standard behaviour.\nvoid Abort();\n#else\ninline void Abort() { abort(); }\n#endif  // GTEST_OS_WINDOWS_MOBILE\n\n}  // namespace posix\n\n// MSVC \"deprecates\" snprintf and issues warnings wherever it is used.  In\n// order to avoid these warnings, we need to use _snprintf or _snprintf_s on\n// MSVC-based platforms.  We map the GTEST_SNPRINTF_ macro to the appropriate\n// function in order to achieve that.  We use macro definition here because\n// snprintf is a variadic function.\n#if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE\n// MSVC 2005 and above support variadic macros.\n# define GTEST_SNPRINTF_(buffer, size, format, ...) \\\n     _snprintf_s(buffer, size, size, format, __VA_ARGS__)\n#elif defined(_MSC_VER)\n// Windows CE does not define _snprintf_s and MSVC prior to 2005 doesn't\n// complain about _snprintf.\n# define GTEST_SNPRINTF_ _snprintf\n#else\n# define GTEST_SNPRINTF_ snprintf\n#endif\n\n// The maximum number a BiggestInt can represent.  This definition\n// works no matter BiggestInt is represented in one's complement or\n// two's complement.\n//\n// We cannot rely on numeric_limits in STL, as __int64 and long long\n// are not part of standard C++ and numeric_limits doesn't need to be\n// defined for them.\nconst BiggestInt kMaxBiggestInt =\n    ~(static_cast<BiggestInt>(1) << (8*sizeof(BiggestInt) - 1));\n\n// This template class serves as a compile-time function from size to\n// type.  It maps a size in bytes to a primitive type with that\n// size. e.g.\n//\n//   TypeWithSize<4>::UInt\n//\n// is typedef-ed to be unsigned int (unsigned integer made up of 4\n// bytes).\n//\n// Such functionality should belong to STL, but I cannot find it\n// there.\n//\n// Google Test uses this class in the implementation of floating-point\n// comparison.\n//\n// For now it only handles UInt (unsigned int) as that's all Google Test\n// needs.  Other types can be easily added in the future if need\n// arises.\ntemplate <size_t size>\nclass TypeWithSize {\n public:\n  // This prevents the user from using TypeWithSize<N> with incorrect\n  // values of N.\n  typedef void UInt;\n};\n\n// The specialization for size 4.\ntemplate <>\nclass TypeWithSize<4> {\n public:\n  // unsigned int has size 4 in both gcc and MSVC.\n  //\n  // As base/basictypes.h doesn't compile on Windows, we cannot use\n  // uint32, uint64, and etc here.\n  typedef int Int;\n  typedef unsigned int UInt;\n};\n\n// The specialization for size 8.\ntemplate <>\nclass TypeWithSize<8> {\n public:\n#if GTEST_OS_WINDOWS\n  typedef __int64 Int;\n  typedef unsigned __int64 UInt;\n#else\n  typedef long long Int;  // NOLINT\n  typedef unsigned long long UInt;  // NOLINT\n#endif  // GTEST_OS_WINDOWS\n};\n\n// Integer types of known sizes.\ntypedef TypeWithSize<4>::Int Int32;\ntypedef TypeWithSize<4>::UInt UInt32;\ntypedef TypeWithSize<8>::Int Int64;\ntypedef TypeWithSize<8>::UInt UInt64;\ntypedef TypeWithSize<8>::Int TimeInMillis;  // Represents time in milliseconds.\n\n// Utilities for command line flags and environment variables.\n\n// Macro for referencing flags.\n#if !defined(GTEST_FLAG)\n# define GTEST_FLAG(name) FLAGS_gtest_##name\n#endif  // !defined(GTEST_FLAG)\n\n#if !defined(GTEST_USE_OWN_FLAGFILE_FLAG_)\n# define GTEST_USE_OWN_FLAGFILE_FLAG_ 1\n#endif  // !defined(GTEST_USE_OWN_FLAGFILE_FLAG_)\n\n#if !defined(GTEST_DECLARE_bool_)\n# define GTEST_FLAG_SAVER_ ::testing::internal::GTestFlagSaver\n\n// Macros for declaring flags.\n# define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name)\n# define GTEST_DECLARE_int32_(name) \\\n    GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name)\n#define GTEST_DECLARE_string_(name) \\\n    GTEST_API_ extern ::std::string GTEST_FLAG(name)\n\n// Macros for defining flags.\n#define GTEST_DEFINE_bool_(name, default_val, doc) \\\n    GTEST_API_ bool GTEST_FLAG(name) = (default_val)\n#define GTEST_DEFINE_int32_(name, default_val, doc) \\\n    GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val)\n#define GTEST_DEFINE_string_(name, default_val, doc) \\\n    GTEST_API_ ::std::string GTEST_FLAG(name) = (default_val)\n\n#endif  // !defined(GTEST_DECLARE_bool_)\n\n// Thread annotations\n#if !defined(GTEST_EXCLUSIVE_LOCK_REQUIRED_)\n# define GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks)\n# define GTEST_LOCK_EXCLUDED_(locks)\n#endif  // !defined(GTEST_EXCLUSIVE_LOCK_REQUIRED_)\n\n// Parses 'str' for a 32-bit signed integer.  If successful, writes the result\n// to *value and returns true; otherwise leaves *value unchanged and returns\n// false.\n// TODO(chandlerc): Find a better way to refactor flag and environment parsing\n// out of both gtest-port.cc and gtest.cc to avoid exporting this utility\n// function.\nbool ParseInt32(const Message& src_text, const char* str, Int32* value);\n\n// Parses a bool/Int32/string from the environment variable\n// corresponding to the given Google Test flag.\nbool BoolFromGTestEnv(const char* flag, bool default_val);\nGTEST_API_ Int32 Int32FromGTestEnv(const char* flag, Int32 default_val);\nconst char* StringFromGTestEnv(const char* flag, const char* default_val);\n\n}  // namespace internal\n}  // namespace testing\n\n#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_\n\n\n#if GTEST_OS_LINUX\n# include <stdlib.h>\n# include <sys/types.h>\n# include <sys/wait.h>\n# include <unistd.h>\n#endif  // GTEST_OS_LINUX\n\n#if GTEST_HAS_EXCEPTIONS\n# include <stdexcept>\n#endif\n\n#include <ctype.h>\n#include <float.h>\n#include <string.h>\n#include <iomanip>\n#include <limits>\n#include <map>\n#include <set>\n#include <string>\n#include <vector>\n\n// Copyright 2005, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan)\n//\n// The Google C++ Testing Framework (Google Test)\n//\n// This header file defines the Message class.\n//\n// IMPORTANT NOTE: Due to limitation of the C++ language, we have to\n// leave some internal implementation details in this header file.\n// They are clearly marked by comments like this:\n//\n//   // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\n//\n// Such code is NOT meant to be used by a user directly, and is subject\n// to CHANGE WITHOUT NOTICE.  Therefore DO NOT DEPEND ON IT in a user\n// program!\n\n#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_\n#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_\n\n#include <limits>\n\n\n// Ensures that there is at least one operator<< in the global namespace.\n// See Message& operator<<(...) below for why.\nvoid operator<<(const testing::internal::Secret&, int);\n\nnamespace testing {\n\n// The Message class works like an ostream repeater.\n//\n// Typical usage:\n//\n//   1. You stream a bunch of values to a Message object.\n//      It will remember the text in a stringstream.\n//   2. Then you stream the Message object to an ostream.\n//      This causes the text in the Message to be streamed\n//      to the ostream.\n//\n// For example;\n//\n//   testing::Message foo;\n//   foo << 1 << \" != \" << 2;\n//   std::cout << foo;\n//\n// will print \"1 != 2\".\n//\n// Message is not intended to be inherited from.  In particular, its\n// destructor is not virtual.\n//\n// Note that stringstream behaves differently in gcc and in MSVC.  You\n// can stream a NULL char pointer to it in the former, but not in the\n// latter (it causes an access violation if you do).  The Message\n// class hides this difference by treating a NULL char pointer as\n// \"(null)\".\nclass GTEST_API_ Message {\n private:\n  // The type of basic IO manipulators (endl, ends, and flush) for\n  // narrow streams.\n  typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&);\n\n public:\n  // Constructs an empty Message.\n  Message();\n\n  // Copy constructor.\n  Message(const Message& msg) : ss_(new ::std::stringstream) {  // NOLINT\n    *ss_ << msg.GetString();\n  }\n\n  // Constructs a Message from a C-string.\n  explicit Message(const char* str) : ss_(new ::std::stringstream) {\n    *ss_ << str;\n  }\n\n#if GTEST_OS_SYMBIAN\n  // Streams a value (either a pointer or not) to this object.\n  template <typename T>\n  inline Message& operator <<(const T& value) {\n    StreamHelper(typename internal::is_pointer<T>::type(), value);\n    return *this;\n  }\n#else\n  // Streams a non-pointer value to this object.\n  template <typename T>\n  inline Message& operator <<(const T& val) {\n    // Some libraries overload << for STL containers.  These\n    // overloads are defined in the global namespace instead of ::std.\n    //\n    // C++'s symbol lookup rule (i.e. Koenig lookup) says that these\n    // overloads are visible in either the std namespace or the global\n    // namespace, but not other namespaces, including the testing\n    // namespace which Google Test's Message class is in.\n    //\n    // To allow STL containers (and other types that has a << operator\n    // defined in the global namespace) to be used in Google Test\n    // assertions, testing::Message must access the custom << operator\n    // from the global namespace.  With this using declaration,\n    // overloads of << defined in the global namespace and those\n    // visible via Koenig lookup are both exposed in this function.\n    using ::operator <<;\n    *ss_ << val;\n    return *this;\n  }\n\n  // Streams a pointer value to this object.\n  //\n  // This function is an overload of the previous one.  When you\n  // stream a pointer to a Message, this definition will be used as it\n  // is more specialized.  (The C++ Standard, section\n  // [temp.func.order].)  If you stream a non-pointer, then the\n  // previous definition will be used.\n  //\n  // The reason for this overload is that streaming a NULL pointer to\n  // ostream is undefined behavior.  Depending on the compiler, you\n  // may get \"0\", \"(nil)\", \"(null)\", or an access violation.  To\n  // ensure consistent result across compilers, we always treat NULL\n  // as \"(null)\".\n  template <typename T>\n  inline Message& operator <<(T* const& pointer) {  // NOLINT\n    if (pointer == NULL) {\n      *ss_ << \"(null)\";\n    } else {\n      *ss_ << pointer;\n    }\n    return *this;\n  }\n#endif  // GTEST_OS_SYMBIAN\n\n  // Since the basic IO manipulators are overloaded for both narrow\n  // and wide streams, we have to provide this specialized definition\n  // of operator <<, even though its body is the same as the\n  // templatized version above.  Without this definition, streaming\n  // endl or other basic IO manipulators to Message will confuse the\n  // compiler.\n  Message& operator <<(BasicNarrowIoManip val) {\n    *ss_ << val;\n    return *this;\n  }\n\n  // Instead of 1/0, we want to see true/false for bool values.\n  Message& operator <<(bool b) {\n    return *this << (b ? \"true\" : \"false\");\n  }\n\n  // These two overloads allow streaming a wide C string to a Message\n  // using the UTF-8 encoding.\n  Message& operator <<(const wchar_t* wide_c_str);\n  Message& operator <<(wchar_t* wide_c_str);\n\n#if GTEST_HAS_STD_WSTRING\n  // Converts the given wide string to a narrow string using the UTF-8\n  // encoding, and streams the result to this Message object.\n  Message& operator <<(const ::std::wstring& wstr);\n#endif  // GTEST_HAS_STD_WSTRING\n\n#if GTEST_HAS_GLOBAL_WSTRING\n  // Converts the given wide string to a narrow string using the UTF-8\n  // encoding, and streams the result to this Message object.\n  Message& operator <<(const ::wstring& wstr);\n#endif  // GTEST_HAS_GLOBAL_WSTRING\n\n  // Gets the text streamed to this object so far as an std::string.\n  // Each '\\0' character in the buffer is replaced with \"\\\\0\".\n  //\n  // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\n  std::string GetString() const;\n\n private:\n\n#if GTEST_OS_SYMBIAN\n  // These are needed as the Nokia Symbian Compiler cannot decide between\n  // const T& and const T* in a function template. The Nokia compiler _can_\n  // decide between class template specializations for T and T*, so a\n  // tr1::type_traits-like is_pointer works, and we can overload on that.\n  template <typename T>\n  inline void StreamHelper(internal::true_type /*is_pointer*/, T* pointer) {\n    if (pointer == NULL) {\n      *ss_ << \"(null)\";\n    } else {\n      *ss_ << pointer;\n    }\n  }\n  template <typename T>\n  inline void StreamHelper(internal::false_type /*is_pointer*/,\n                           const T& value) {\n    // See the comments in Message& operator <<(const T&) above for why\n    // we need this using statement.\n    using ::operator <<;\n    *ss_ << value;\n  }\n#endif  // GTEST_OS_SYMBIAN\n\n  // We'll hold the text streamed to this object here.\n  const internal::scoped_ptr< ::std::stringstream> ss_;\n\n  // We declare (but don't implement) this to prevent the compiler\n  // from implementing the assignment operator.\n  void operator=(const Message&);\n};\n\n// Streams a Message to an ostream.\ninline std::ostream& operator <<(std::ostream& os, const Message& sb) {\n  return os << sb.GetString();\n}\n\nnamespace internal {\n\n// Converts a streamable value to an std::string.  A NULL pointer is\n// converted to \"(null)\".  When the input value is a ::string,\n// ::std::string, ::wstring, or ::std::wstring object, each NUL\n// character in it is replaced with \"\\\\0\".\ntemplate <typename T>\nstd::string StreamableToString(const T& streamable) {\n  return (Message() << streamable).GetString();\n}\n\n}  // namespace internal\n}  // namespace testing\n\n#endif  // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_\n// Copyright 2005, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)\n//\n// The Google C++ Testing Framework (Google Test)\n//\n// This header file declares the String class and functions used internally by\n// Google Test.  They are subject to change without notice. They should not used\n// by code external to Google Test.\n//\n// This header file is #included by <gtest/internal/gtest-internal.h>.\n// It should not be #included by other files.\n\n#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_\n#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_\n\n#ifdef __BORLANDC__\n// string.h is not guaranteed to provide strcpy on C++ Builder.\n# include <mem.h>\n#endif\n\n#include <string.h>\n#include <string>\n\n\nnamespace testing {\nnamespace internal {\n\n// String - an abstract class holding static string utilities.\nclass GTEST_API_ String {\n public:\n  // Static utility methods\n\n  // Clones a 0-terminated C string, allocating memory using new.  The\n  // caller is responsible for deleting the return value using\n  // delete[].  Returns the cloned string, or NULL if the input is\n  // NULL.\n  //\n  // This is different from strdup() in string.h, which allocates\n  // memory using malloc().\n  static const char* CloneCString(const char* c_str);\n\n#if GTEST_OS_WINDOWS_MOBILE\n  // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be\n  // able to pass strings to Win32 APIs on CE we need to convert them\n  // to 'Unicode', UTF-16.\n\n  // Creates a UTF-16 wide string from the given ANSI string, allocating\n  // memory using new. The caller is responsible for deleting the return\n  // value using delete[]. Returns the wide string, or NULL if the\n  // input is NULL.\n  //\n  // The wide string is created using the ANSI codepage (CP_ACP) to\n  // match the behaviour of the ANSI versions of Win32 calls and the\n  // C runtime.\n  static LPCWSTR AnsiToUtf16(const char* c_str);\n\n  // Creates an ANSI string from the given wide string, allocating\n  // memory using new. The caller is responsible for deleting the return\n  // value using delete[]. Returns the ANSI string, or NULL if the\n  // input is NULL.\n  //\n  // The returned string is created using the ANSI codepage (CP_ACP) to\n  // match the behaviour of the ANSI versions of Win32 calls and the\n  // C runtime.\n  static const char* Utf16ToAnsi(LPCWSTR utf16_str);\n#endif\n\n  // Compares two C strings.  Returns true iff they have the same content.\n  //\n  // Unlike strcmp(), this function can handle NULL argument(s).  A\n  // NULL C string is considered different to any non-NULL C string,\n  // including the empty string.\n  static bool CStringEquals(const char* lhs, const char* rhs);\n\n  // Converts a wide C string to a String using the UTF-8 encoding.\n  // NULL will be converted to \"(null)\".  If an error occurred during\n  // the conversion, \"(failed to convert from wide string)\" is\n  // returned.\n  static std::string ShowWideCString(const wchar_t* wide_c_str);\n\n  // Compares two wide C strings.  Returns true iff they have the same\n  // content.\n  //\n  // Unlike wcscmp(), this function can handle NULL argument(s).  A\n  // NULL C string is considered different to any non-NULL C string,\n  // including the empty string.\n  static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs);\n\n  // Compares two C strings, ignoring case.  Returns true iff they\n  // have the same content.\n  //\n  // Unlike strcasecmp(), this function can handle NULL argument(s).\n  // A NULL C string is considered different to any non-NULL C string,\n  // including the empty string.\n  static bool CaseInsensitiveCStringEquals(const char* lhs,\n                                           const char* rhs);\n\n  // Compares two wide C strings, ignoring case.  Returns true iff they\n  // have the same content.\n  //\n  // Unlike wcscasecmp(), this function can handle NULL argument(s).\n  // A NULL C string is considered different to any non-NULL wide C string,\n  // including the empty string.\n  // NB: The implementations on different platforms slightly differ.\n  // On windows, this method uses _wcsicmp which compares according to LC_CTYPE\n  // environment variable. On GNU platform this method uses wcscasecmp\n  // which compares according to LC_CTYPE category of the current locale.\n  // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the\n  // current locale.\n  static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs,\n                                               const wchar_t* rhs);\n\n  // Returns true iff the given string ends with the given suffix, ignoring\n  // case. Any string is considered to end with an empty suffix.\n  static bool EndsWithCaseInsensitive(\n      const std::string& str, const std::string& suffix);\n\n  // Formats an int value as \"%02d\".\n  static std::string FormatIntWidth2(int value);  // \"%02d\" for width == 2\n\n  // Formats an int value as \"%X\".\n  static std::string FormatHexInt(int value);\n\n  // Formats a byte as \"%02X\".\n  static std::string FormatByte(unsigned char value);\n\n private:\n  String();  // Not meant to be instantiated.\n};  // class String\n\n// Gets the content of the stringstream's buffer as an std::string.  Each '\\0'\n// character in the buffer is replaced with \"\\\\0\".\nGTEST_API_ std::string StringStreamToString(::std::stringstream* stream);\n\n}  // namespace internal\n}  // namespace testing\n\n#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_\n// Copyright 2008, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: keith.ray@gmail.com (Keith Ray)\n//\n// Google Test filepath utilities\n//\n// This header file declares classes and functions used internally by\n// Google Test.  They are subject to change without notice.\n//\n// This file is #included in <gtest/internal/gtest-internal.h>.\n// Do not include this header file separately!\n\n#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_\n#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_\n\n\nnamespace testing {\nnamespace internal {\n\n// FilePath - a class for file and directory pathname manipulation which\n// handles platform-specific conventions (like the pathname separator).\n// Used for helper functions for naming files in a directory for xml output.\n// Except for Set methods, all methods are const or static, which provides an\n// \"immutable value object\" -- useful for peace of mind.\n// A FilePath with a value ending in a path separator (\"like/this/\") represents\n// a directory, otherwise it is assumed to represent a file. In either case,\n// it may or may not represent an actual file or directory in the file system.\n// Names are NOT checked for syntax correctness -- no checking for illegal\n// characters, malformed paths, etc.\n\nclass GTEST_API_ FilePath {\n public:\n  FilePath() : pathname_(\"\") { }\n  FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { }\n\n  explicit FilePath(const std::string& pathname) : pathname_(pathname) {\n    Normalize();\n  }\n\n  FilePath& operator=(const FilePath& rhs) {\n    Set(rhs);\n    return *this;\n  }\n\n  void Set(const FilePath& rhs) {\n    pathname_ = rhs.pathname_;\n  }\n\n  const std::string& string() const { return pathname_; }\n  const char* c_str() const { return pathname_.c_str(); }\n\n  // Returns the current working directory, or \"\" if unsuccessful.\n  static FilePath GetCurrentDir();\n\n  // Given directory = \"dir\", base_name = \"test\", number = 0,\n  // extension = \"xml\", returns \"dir/test.xml\". If number is greater\n  // than zero (e.g., 12), returns \"dir/test_12.xml\".\n  // On Windows platform, uses \\ as the separator rather than /.\n  static FilePath MakeFileName(const FilePath& directory,\n                               const FilePath& base_name,\n                               int number,\n                               const char* extension);\n\n  // Given directory = \"dir\", relative_path = \"test.xml\",\n  // returns \"dir/test.xml\".\n  // On Windows, uses \\ as the separator rather than /.\n  static FilePath ConcatPaths(const FilePath& directory,\n                              const FilePath& relative_path);\n\n  // Returns a pathname for a file that does not currently exist. The pathname\n  // will be directory/base_name.extension or\n  // directory/base_name_<number>.extension if directory/base_name.extension\n  // already exists. The number will be incremented until a pathname is found\n  // that does not already exist.\n  // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.\n  // There could be a race condition if two or more processes are calling this\n  // function at the same time -- they could both pick the same filename.\n  static FilePath GenerateUniqueFileName(const FilePath& directory,\n                                         const FilePath& base_name,\n                                         const char* extension);\n\n  // Returns true iff the path is \"\".\n  bool IsEmpty() const { return pathname_.empty(); }\n\n  // If input name has a trailing separator character, removes it and returns\n  // the name, otherwise return the name string unmodified.\n  // On Windows platform, uses \\ as the separator, other platforms use /.\n  FilePath RemoveTrailingPathSeparator() const;\n\n  // Returns a copy of the FilePath with the directory part removed.\n  // Example: FilePath(\"path/to/file\").RemoveDirectoryName() returns\n  // FilePath(\"file\"). If there is no directory part (\"just_a_file\"), it returns\n  // the FilePath unmodified. If there is no file part (\"just_a_dir/\") it\n  // returns an empty FilePath (\"\").\n  // On Windows platform, '\\' is the path separator, otherwise it is '/'.\n  FilePath RemoveDirectoryName() const;\n\n  // RemoveFileName returns the directory path with the filename removed.\n  // Example: FilePath(\"path/to/file\").RemoveFileName() returns \"path/to/\".\n  // If the FilePath is \"a_file\" or \"/a_file\", RemoveFileName returns\n  // FilePath(\"./\") or, on Windows, FilePath(\".\\\\\"). If the filepath does\n  // not have a file, like \"just/a/dir/\", it returns the FilePath unmodified.\n  // On Windows platform, '\\' is the path separator, otherwise it is '/'.\n  FilePath RemoveFileName() const;\n\n  // Returns a copy of the FilePath with the case-insensitive extension removed.\n  // Example: FilePath(\"dir/file.exe\").RemoveExtension(\"EXE\") returns\n  // FilePath(\"dir/file\"). If a case-insensitive extension is not\n  // found, returns a copy of the original FilePath.\n  FilePath RemoveExtension(const char* extension) const;\n\n  // Creates directories so that path exists. Returns true if successful or if\n  // the directories already exist; returns false if unable to create\n  // directories for any reason. Will also return false if the FilePath does\n  // not represent a directory (that is, it doesn't end with a path separator).\n  bool CreateDirectoriesRecursively() const;\n\n  // Create the directory so that path exists. Returns true if successful or\n  // if the directory already exists; returns false if unable to create the\n  // directory for any reason, including if the parent directory does not\n  // exist. Not named \"CreateDirectory\" because that's a macro on Windows.\n  bool CreateFolder() const;\n\n  // Returns true if FilePath describes something in the file-system,\n  // either a file, directory, or whatever, and that something exists.\n  bool FileOrDirectoryExists() const;\n\n  // Returns true if pathname describes a directory in the file-system\n  // that exists.\n  bool DirectoryExists() const;\n\n  // Returns true if FilePath ends with a path separator, which indicates that\n  // it is intended to represent a directory. Returns false otherwise.\n  // This does NOT check that a directory (or file) actually exists.\n  bool IsDirectory() const;\n\n  // Returns true if pathname describes a root directory. (Windows has one\n  // root directory per disk drive.)\n  bool IsRootDirectory() const;\n\n  // Returns true if pathname describes an absolute path.\n  bool IsAbsolutePath() const;\n\n private:\n  // Replaces multiple consecutive separators with a single separator.\n  // For example, \"bar///foo\" becomes \"bar/foo\". Does not eliminate other\n  // redundancies that might be in a pathname involving \".\" or \"..\".\n  //\n  // A pathname with multiple consecutive separators may occur either through\n  // user error or as a result of some scripts or APIs that generate a pathname\n  // with a trailing separator. On other platforms the same API or script\n  // may NOT generate a pathname with a trailing \"/\". Then elsewhere that\n  // pathname may have another \"/\" and pathname components added to it,\n  // without checking for the separator already being there.\n  // The script language and operating system may allow paths like \"foo//bar\"\n  // but some of the functions in FilePath will not handle that correctly. In\n  // particular, RemoveTrailingPathSeparator() only removes one separator, and\n  // it is called in CreateDirectoriesRecursively() assuming that it will change\n  // a pathname from directory syntax (trailing separator) to filename syntax.\n  //\n  // On Windows this method also replaces the alternate path separator '/' with\n  // the primary path separator '\\\\', so that for example \"bar\\\\/\\\\foo\" becomes\n  // \"bar\\\\foo\".\n\n  void Normalize();\n\n  // Returns a pointer to the last occurence of a valid path separator in\n  // the FilePath. On Windows, for example, both '/' and '\\' are valid path\n  // separators. Returns NULL if no path separator was found.\n  const char* FindLastPathSeparator() const;\n\n  std::string pathname_;\n};  // class FilePath\n\n}  // namespace internal\n}  // namespace testing\n\n#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_\n// This file was GENERATED by command:\n//     pump.py gtest-type-util.h.pump\n// DO NOT EDIT BY HAND!!!\n\n// Copyright 2008 Google Inc.\n// All Rights Reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan)\n\n// Type utilities needed for implementing typed and type-parameterized\n// tests.  This file is generated by a SCRIPT.  DO NOT EDIT BY HAND!\n//\n// Currently we support at most 50 types in a list, and at most 50\n// type-parameterized tests in one type-parameterized test case.\n// Please contact googletestframework@googlegroups.com if you need\n// more.\n\n#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_\n#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_\n\n\n// #ifdef __GNUC__ is too general here.  It is possible to use gcc without using\n// libstdc++ (which is where cxxabi.h comes from).\n# if GTEST_HAS_CXXABI_H_\n#  include <cxxabi.h>\n# elif defined(__HP_aCC)\n#  include <acxx_demangle.h>\n# endif  // GTEST_HASH_CXXABI_H_\n\nnamespace testing {\nnamespace internal {\n\n// GetTypeName<T>() returns a human-readable name of type T.\n// NB: This function is also used in Google Mock, so don't move it inside of\n// the typed-test-only section below.\ntemplate <typename T>\nstd::string GetTypeName() {\n# if GTEST_HAS_RTTI\n\n  const char* const name = typeid(T).name();\n#  if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC)\n  int status = 0;\n  // gcc's implementation of typeid(T).name() mangles the type name,\n  // so we have to demangle it.\n#   if GTEST_HAS_CXXABI_H_\n  using abi::__cxa_demangle;\n#   endif  // GTEST_HAS_CXXABI_H_\n  char* const readable_name = __cxa_demangle(name, 0, 0, &status);\n  const std::string name_str(status == 0 ? readable_name : name);\n  free(readable_name);\n  return name_str;\n#  else\n  return name;\n#  endif  // GTEST_HAS_CXXABI_H_ || __HP_aCC\n\n# else\n\n  return \"<type>\";\n\n# endif  // GTEST_HAS_RTTI\n}\n\n#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P\n\n// AssertyTypeEq<T1, T2>::type is defined iff T1 and T2 are the same\n// type.  This can be used as a compile-time assertion to ensure that\n// two types are equal.\n\ntemplate <typename T1, typename T2>\nstruct AssertTypeEq;\n\ntemplate <typename T>\nstruct AssertTypeEq<T, T> {\n  typedef bool type;\n};\n\n// A unique type used as the default value for the arguments of class\n// template Types.  This allows us to simulate variadic templates\n// (e.g. Types<int>, Type<int, double>, and etc), which C++ doesn't\n// support directly.\nstruct None {};\n\n// The following family of struct and struct templates are used to\n// represent type lists.  In particular, TypesN<T1, T2, ..., TN>\n// represents a type list with N types (T1, T2, ..., and TN) in it.\n// Except for Types0, every struct in the family has two member types:\n// Head for the first type in the list, and Tail for the rest of the\n// list.\n\n// The empty type list.\nstruct Types0 {};\n\n// Type lists of length 1, 2, 3, and so on.\n\ntemplate <typename T1>\nstruct Types1 {\n  typedef T1 Head;\n  typedef Types0 Tail;\n};\ntemplate <typename T1, typename T2>\nstruct Types2 {\n  typedef T1 Head;\n  typedef Types1<T2> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3>\nstruct Types3 {\n  typedef T1 Head;\n  typedef Types2<T2, T3> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4>\nstruct Types4 {\n  typedef T1 Head;\n  typedef Types3<T2, T3, T4> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5>\nstruct Types5 {\n  typedef T1 Head;\n  typedef Types4<T2, T3, T4, T5> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6>\nstruct Types6 {\n  typedef T1 Head;\n  typedef Types5<T2, T3, T4, T5, T6> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7>\nstruct Types7 {\n  typedef T1 Head;\n  typedef Types6<T2, T3, T4, T5, T6, T7> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8>\nstruct Types8 {\n  typedef T1 Head;\n  typedef Types7<T2, T3, T4, T5, T6, T7, T8> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9>\nstruct Types9 {\n  typedef T1 Head;\n  typedef Types8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10>\nstruct Types10 {\n  typedef T1 Head;\n  typedef Types9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11>\nstruct Types11 {\n  typedef T1 Head;\n  typedef Types10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12>\nstruct Types12 {\n  typedef T1 Head;\n  typedef Types11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13>\nstruct Types13 {\n  typedef T1 Head;\n  typedef Types12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14>\nstruct Types14 {\n  typedef T1 Head;\n  typedef Types13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15>\nstruct Types15 {\n  typedef T1 Head;\n  typedef Types14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16>\nstruct Types16 {\n  typedef T1 Head;\n  typedef Types15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17>\nstruct Types17 {\n  typedef T1 Head;\n  typedef Types16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18>\nstruct Types18 {\n  typedef T1 Head;\n  typedef Types17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19>\nstruct Types19 {\n  typedef T1 Head;\n  typedef Types18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20>\nstruct Types20 {\n  typedef T1 Head;\n  typedef Types19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21>\nstruct Types21 {\n  typedef T1 Head;\n  typedef Types20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22>\nstruct Types22 {\n  typedef T1 Head;\n  typedef Types21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23>\nstruct Types23 {\n  typedef T1 Head;\n  typedef Types22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24>\nstruct Types24 {\n  typedef T1 Head;\n  typedef Types23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25>\nstruct Types25 {\n  typedef T1 Head;\n  typedef Types24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26>\nstruct Types26 {\n  typedef T1 Head;\n  typedef Types25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27>\nstruct Types27 {\n  typedef T1 Head;\n  typedef Types26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28>\nstruct Types28 {\n  typedef T1 Head;\n  typedef Types27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29>\nstruct Types29 {\n  typedef T1 Head;\n  typedef Types28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30>\nstruct Types30 {\n  typedef T1 Head;\n  typedef Types29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31>\nstruct Types31 {\n  typedef T1 Head;\n  typedef Types30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32>\nstruct Types32 {\n  typedef T1 Head;\n  typedef Types31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33>\nstruct Types33 {\n  typedef T1 Head;\n  typedef Types32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34>\nstruct Types34 {\n  typedef T1 Head;\n  typedef Types33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35>\nstruct Types35 {\n  typedef T1 Head;\n  typedef Types34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36>\nstruct Types36 {\n  typedef T1 Head;\n  typedef Types35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37>\nstruct Types37 {\n  typedef T1 Head;\n  typedef Types36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38>\nstruct Types38 {\n  typedef T1 Head;\n  typedef Types37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39>\nstruct Types39 {\n  typedef T1 Head;\n  typedef Types38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40>\nstruct Types40 {\n  typedef T1 Head;\n  typedef Types39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41>\nstruct Types41 {\n  typedef T1 Head;\n  typedef Types40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42>\nstruct Types42 {\n  typedef T1 Head;\n  typedef Types41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43>\nstruct Types43 {\n  typedef T1 Head;\n  typedef Types42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,\n      T43> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44>\nstruct Types44 {\n  typedef T1 Head;\n  typedef Types43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n      T44> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45>\nstruct Types45 {\n  typedef T1 Head;\n  typedef Types44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n      T44, T45> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46>\nstruct Types46 {\n  typedef T1 Head;\n  typedef Types45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n      T44, T45, T46> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47>\nstruct Types47 {\n  typedef T1 Head;\n  typedef Types46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n      T44, T45, T46, T47> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47, typename T48>\nstruct Types48 {\n  typedef T1 Head;\n  typedef Types47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n      T44, T45, T46, T47, T48> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47, typename T48, typename T49>\nstruct Types49 {\n  typedef T1 Head;\n  typedef Types48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n      T44, T45, T46, T47, T48, T49> Tail;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47, typename T48, typename T49, typename T50>\nstruct Types50 {\n  typedef T1 Head;\n  typedef Types49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n      T44, T45, T46, T47, T48, T49, T50> Tail;\n};\n\n\n}  // namespace internal\n\n// We don't want to require the users to write TypesN<...> directly,\n// as that would require them to count the length.  Types<...> is much\n// easier to write, but generates horrible messages when there is a\n// compiler error, as gcc insists on printing out each template\n// argument, even if it has the default value (this means Types<int>\n// will appear as Types<int, None, None, ..., None> in the compiler\n// errors).\n//\n// Our solution is to combine the best part of the two approaches: a\n// user would write Types<T1, ..., TN>, and Google Test will translate\n// that to TypesN<T1, ..., TN> internally to make error messages\n// readable.  The translation is done by the 'type' member of the\n// Types template.\ntemplate <typename T1 = internal::None, typename T2 = internal::None,\n    typename T3 = internal::None, typename T4 = internal::None,\n    typename T5 = internal::None, typename T6 = internal::None,\n    typename T7 = internal::None, typename T8 = internal::None,\n    typename T9 = internal::None, typename T10 = internal::None,\n    typename T11 = internal::None, typename T12 = internal::None,\n    typename T13 = internal::None, typename T14 = internal::None,\n    typename T15 = internal::None, typename T16 = internal::None,\n    typename T17 = internal::None, typename T18 = internal::None,\n    typename T19 = internal::None, typename T20 = internal::None,\n    typename T21 = internal::None, typename T22 = internal::None,\n    typename T23 = internal::None, typename T24 = internal::None,\n    typename T25 = internal::None, typename T26 = internal::None,\n    typename T27 = internal::None, typename T28 = internal::None,\n    typename T29 = internal::None, typename T30 = internal::None,\n    typename T31 = internal::None, typename T32 = internal::None,\n    typename T33 = internal::None, typename T34 = internal::None,\n    typename T35 = internal::None, typename T36 = internal::None,\n    typename T37 = internal::None, typename T38 = internal::None,\n    typename T39 = internal::None, typename T40 = internal::None,\n    typename T41 = internal::None, typename T42 = internal::None,\n    typename T43 = internal::None, typename T44 = internal::None,\n    typename T45 = internal::None, typename T46 = internal::None,\n    typename T47 = internal::None, typename T48 = internal::None,\n    typename T49 = internal::None, typename T50 = internal::None>\nstruct Types {\n  typedef internal::Types50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,\n      T41, T42, T43, T44, T45, T46, T47, T48, T49, T50> type;\n};\n\ntemplate <>\nstruct Types<internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None> {\n  typedef internal::Types0 type;\n};\ntemplate <typename T1>\nstruct Types<T1, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None> {\n  typedef internal::Types1<T1> type;\n};\ntemplate <typename T1, typename T2>\nstruct Types<T1, T2, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None> {\n  typedef internal::Types2<T1, T2> type;\n};\ntemplate <typename T1, typename T2, typename T3>\nstruct Types<T1, T2, T3, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None> {\n  typedef internal::Types3<T1, T2, T3> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4>\nstruct Types<T1, T2, T3, T4, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None> {\n  typedef internal::Types4<T1, T2, T3, T4> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5>\nstruct Types<T1, T2, T3, T4, T5, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None> {\n  typedef internal::Types5<T1, T2, T3, T4, T5> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6>\nstruct Types<T1, T2, T3, T4, T5, T6, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None> {\n  typedef internal::Types6<T1, T2, T3, T4, T5, T6> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None> {\n  typedef internal::Types7<T1, T2, T3, T4, T5, T6, T7> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None> {\n  typedef internal::Types8<T1, T2, T3, T4, T5, T6, T7, T8> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None> {\n  typedef internal::Types9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None> {\n  typedef internal::Types10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None> {\n  typedef internal::Types11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None> {\n  typedef internal::Types12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None> {\n  typedef internal::Types13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None> {\n  typedef internal::Types14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None> {\n  typedef internal::Types15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None> {\n  typedef internal::Types16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None> {\n  typedef internal::Types17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None> {\n  typedef internal::Types18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None> {\n  typedef internal::Types19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None> {\n  typedef internal::Types20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None> {\n  typedef internal::Types21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None> {\n  typedef internal::Types22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None> {\n  typedef internal::Types23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None> {\n  typedef internal::Types24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None> {\n  typedef internal::Types25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None> {\n  typedef internal::Types26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None> {\n  typedef internal::Types27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None> {\n  typedef internal::Types28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None> {\n  typedef internal::Types29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None> {\n  typedef internal::Types30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None> {\n  typedef internal::Types31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None> {\n  typedef internal::Types32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None> {\n  typedef internal::Types33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None> {\n  typedef internal::Types34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None> {\n  typedef internal::Types35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None> {\n  typedef internal::Types36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, T37, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None> {\n  typedef internal::Types37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, T37, T38, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None> {\n  typedef internal::Types38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, T37, T38, T39, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None> {\n  typedef internal::Types39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None> {\n  typedef internal::Types40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,\n      T40> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None, internal::None> {\n  typedef internal::Types41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,\n      T41> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, internal::None,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None> {\n  typedef internal::Types42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,\n      T41, T42> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None, internal::None> {\n  typedef internal::Types43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,\n      T41, T42, T43> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None, internal::None> {\n  typedef internal::Types44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,\n      T41, T42, T43, T44> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,\n    internal::None, internal::None, internal::None, internal::None,\n    internal::None> {\n  typedef internal::Types45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,\n      T41, T42, T43, T44, T45> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,\n    T46, internal::None, internal::None, internal::None, internal::None> {\n  typedef internal::Types46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,\n      T41, T42, T43, T44, T45, T46> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,\n    T46, T47, internal::None, internal::None, internal::None> {\n  typedef internal::Types47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,\n      T41, T42, T43, T44, T45, T46, T47> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47, typename T48>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,\n    T46, T47, T48, internal::None, internal::None> {\n  typedef internal::Types48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,\n      T41, T42, T43, T44, T45, T46, T47, T48> type;\n};\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47, typename T48, typename T49>\nstruct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,\n    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,\n    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,\n    T46, T47, T48, T49, internal::None> {\n  typedef internal::Types49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,\n      T41, T42, T43, T44, T45, T46, T47, T48, T49> type;\n};\n\nnamespace internal {\n\n# define GTEST_TEMPLATE_ template <typename T> class\n\n// The template \"selector\" struct TemplateSel<Tmpl> is used to\n// represent Tmpl, which must be a class template with one type\n// parameter, as a type.  TemplateSel<Tmpl>::Bind<T>::type is defined\n// as the type Tmpl<T>.  This allows us to actually instantiate the\n// template \"selected\" by TemplateSel<Tmpl>.\n//\n// This trick is necessary for simulating typedef for class templates,\n// which C++ doesn't support directly.\ntemplate <GTEST_TEMPLATE_ Tmpl>\nstruct TemplateSel {\n  template <typename T>\n  struct Bind {\n    typedef Tmpl<T> type;\n  };\n};\n\n# define GTEST_BIND_(TmplSel, T) \\\n  TmplSel::template Bind<T>::type\n\n// A unique struct template used as the default value for the\n// arguments of class template Templates.  This allows us to simulate\n// variadic templates (e.g. Templates<int>, Templates<int, double>,\n// and etc), which C++ doesn't support directly.\ntemplate <typename T>\nstruct NoneT {};\n\n// The following family of struct and struct templates are used to\n// represent template lists.  In particular, TemplatesN<T1, T2, ...,\n// TN> represents a list of N templates (T1, T2, ..., and TN).  Except\n// for Templates0, every struct in the family has two member types:\n// Head for the selector of the first template in the list, and Tail\n// for the rest of the list.\n\n// The empty template list.\nstruct Templates0 {};\n\n// Template lists of length 1, 2, 3, and so on.\n\ntemplate <GTEST_TEMPLATE_ T1>\nstruct Templates1 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates0 Tail;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>\nstruct Templates2 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates1<T2> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>\nstruct Templates3 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates2<T2, T3> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4>\nstruct Templates4 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates3<T2, T3, T4> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>\nstruct Templates5 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates4<T2, T3, T4, T5> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>\nstruct Templates6 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates5<T2, T3, T4, T5, T6> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7>\nstruct Templates7 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates6<T2, T3, T4, T5, T6, T7> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>\nstruct Templates8 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates7<T2, T3, T4, T5, T6, T7, T8> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>\nstruct Templates9 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10>\nstruct Templates10 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>\nstruct Templates11 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>\nstruct Templates12 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13>\nstruct Templates13 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>\nstruct Templates14 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>\nstruct Templates15 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16>\nstruct Templates16 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>\nstruct Templates17 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>\nstruct Templates18 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19>\nstruct Templates19 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>\nstruct Templates20 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>\nstruct Templates21 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22>\nstruct Templates22 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>\nstruct Templates23 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>\nstruct Templates24 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25>\nstruct Templates25 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>\nstruct Templates26 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>\nstruct Templates27 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28>\nstruct Templates28 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>\nstruct Templates29 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>\nstruct Templates30 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31>\nstruct Templates31 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>\nstruct Templates32 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>\nstruct Templates33 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34>\nstruct Templates34 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>\nstruct Templates35 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>\nstruct Templates36 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37>\nstruct Templates37 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>\nstruct Templates38 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>\nstruct Templates39 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40>\nstruct Templates40 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>\nstruct Templates41 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>\nstruct Templates42 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,\n      T42> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43>\nstruct Templates43 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,\n      T43> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>\nstruct Templates44 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,\n      T43, T44> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>\nstruct Templates45 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,\n      T43, T44, T45> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,\n    GTEST_TEMPLATE_ T46>\nstruct Templates46 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,\n      T43, T44, T45, T46> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,\n    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>\nstruct Templates47 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,\n      T43, T44, T45, T46, T47> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,\n    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>\nstruct Templates48 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,\n      T43, T44, T45, T46, T47, T48> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,\n    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,\n    GTEST_TEMPLATE_ T49>\nstruct Templates49 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,\n      T43, T44, T45, T46, T47, T48, T49> Tail;\n};\n\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,\n    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,\n    GTEST_TEMPLATE_ T49, GTEST_TEMPLATE_ T50>\nstruct Templates50 {\n  typedef TemplateSel<T1> Head;\n  typedef Templates49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,\n      T43, T44, T45, T46, T47, T48, T49, T50> Tail;\n};\n\n\n// We don't want to require the users to write TemplatesN<...> directly,\n// as that would require them to count the length.  Templates<...> is much\n// easier to write, but generates horrible messages when there is a\n// compiler error, as gcc insists on printing out each template\n// argument, even if it has the default value (this means Templates<list>\n// will appear as Templates<list, NoneT, NoneT, ..., NoneT> in the compiler\n// errors).\n//\n// Our solution is to combine the best part of the two approaches: a\n// user would write Templates<T1, ..., TN>, and Google Test will translate\n// that to TemplatesN<T1, ..., TN> internally to make error messages\n// readable.  The translation is done by the 'type' member of the\n// Templates template.\ntemplate <GTEST_TEMPLATE_ T1 = NoneT, GTEST_TEMPLATE_ T2 = NoneT,\n    GTEST_TEMPLATE_ T3 = NoneT, GTEST_TEMPLATE_ T4 = NoneT,\n    GTEST_TEMPLATE_ T5 = NoneT, GTEST_TEMPLATE_ T6 = NoneT,\n    GTEST_TEMPLATE_ T7 = NoneT, GTEST_TEMPLATE_ T8 = NoneT,\n    GTEST_TEMPLATE_ T9 = NoneT, GTEST_TEMPLATE_ T10 = NoneT,\n    GTEST_TEMPLATE_ T11 = NoneT, GTEST_TEMPLATE_ T12 = NoneT,\n    GTEST_TEMPLATE_ T13 = NoneT, GTEST_TEMPLATE_ T14 = NoneT,\n    GTEST_TEMPLATE_ T15 = NoneT, GTEST_TEMPLATE_ T16 = NoneT,\n    GTEST_TEMPLATE_ T17 = NoneT, GTEST_TEMPLATE_ T18 = NoneT,\n    GTEST_TEMPLATE_ T19 = NoneT, GTEST_TEMPLATE_ T20 = NoneT,\n    GTEST_TEMPLATE_ T21 = NoneT, GTEST_TEMPLATE_ T22 = NoneT,\n    GTEST_TEMPLATE_ T23 = NoneT, GTEST_TEMPLATE_ T24 = NoneT,\n    GTEST_TEMPLATE_ T25 = NoneT, GTEST_TEMPLATE_ T26 = NoneT,\n    GTEST_TEMPLATE_ T27 = NoneT, GTEST_TEMPLATE_ T28 = NoneT,\n    GTEST_TEMPLATE_ T29 = NoneT, GTEST_TEMPLATE_ T30 = NoneT,\n    GTEST_TEMPLATE_ T31 = NoneT, GTEST_TEMPLATE_ T32 = NoneT,\n    GTEST_TEMPLATE_ T33 = NoneT, GTEST_TEMPLATE_ T34 = NoneT,\n    GTEST_TEMPLATE_ T35 = NoneT, GTEST_TEMPLATE_ T36 = NoneT,\n    GTEST_TEMPLATE_ T37 = NoneT, GTEST_TEMPLATE_ T38 = NoneT,\n    GTEST_TEMPLATE_ T39 = NoneT, GTEST_TEMPLATE_ T40 = NoneT,\n    GTEST_TEMPLATE_ T41 = NoneT, GTEST_TEMPLATE_ T42 = NoneT,\n    GTEST_TEMPLATE_ T43 = NoneT, GTEST_TEMPLATE_ T44 = NoneT,\n    GTEST_TEMPLATE_ T45 = NoneT, GTEST_TEMPLATE_ T46 = NoneT,\n    GTEST_TEMPLATE_ T47 = NoneT, GTEST_TEMPLATE_ T48 = NoneT,\n    GTEST_TEMPLATE_ T49 = NoneT, GTEST_TEMPLATE_ T50 = NoneT>\nstruct Templates {\n  typedef Templates50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,\n      T42, T43, T44, T45, T46, T47, T48, T49, T50> type;\n};\n\ntemplate <>\nstruct Templates<NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT> {\n  typedef Templates0 type;\n};\ntemplate <GTEST_TEMPLATE_ T1>\nstruct Templates<T1, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT> {\n  typedef Templates1<T1> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>\nstruct Templates<T1, T2, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT> {\n  typedef Templates2<T1, T2> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>\nstruct Templates<T1, T2, T3, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates3<T1, T2, T3> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4>\nstruct Templates<T1, T2, T3, T4, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates4<T1, T2, T3, T4> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>\nstruct Templates<T1, T2, T3, T4, T5, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates5<T1, T2, T3, T4, T5> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>\nstruct Templates<T1, T2, T3, T4, T5, T6, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates6<T1, T2, T3, T4, T5, T6> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates7<T1, T2, T3, T4, T5, T6, T7> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates8<T1, T2, T3, T4, T5, T6, T7, T8> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT> {\n  typedef Templates22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT> {\n  typedef Templates23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT> {\n  typedef Templates24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT> {\n  typedef Templates25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT> {\n  typedef Templates26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT> {\n  typedef Templates27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT> {\n  typedef Templates28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT> {\n  typedef Templates29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, T37, NoneT, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, T37, T38, NoneT, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, NoneT, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, NoneT, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,\n      T41> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, NoneT,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,\n      T42> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,\n      T42, T43> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,\n    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,\n      T42, T43, T44> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,\n    T45, NoneT, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,\n      T42, T43, T44, T45> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,\n    GTEST_TEMPLATE_ T46>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,\n    T45, T46, NoneT, NoneT, NoneT, NoneT> {\n  typedef Templates46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,\n      T42, T43, T44, T45, T46> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,\n    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,\n    T45, T46, T47, NoneT, NoneT, NoneT> {\n  typedef Templates47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,\n      T42, T43, T44, T45, T46, T47> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,\n    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,\n    T45, T46, T47, T48, NoneT, NoneT> {\n  typedef Templates48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,\n      T42, T43, T44, T45, T46, T47, T48> type;\n};\ntemplate <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,\n    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,\n    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,\n    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,\n    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,\n    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,\n    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,\n    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,\n    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,\n    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,\n    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,\n    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,\n    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,\n    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,\n    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,\n    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,\n    GTEST_TEMPLATE_ T49>\nstruct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,\n    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,\n    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,\n    T45, T46, T47, T48, T49, NoneT> {\n  typedef Templates49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,\n      T42, T43, T44, T45, T46, T47, T48, T49> type;\n};\n\n// The TypeList template makes it possible to use either a single type\n// or a Types<...> list in TYPED_TEST_CASE() and\n// INSTANTIATE_TYPED_TEST_CASE_P().\n\ntemplate <typename T>\nstruct TypeList {\n  typedef Types1<T> type;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47, typename T48, typename T49, typename T50>\nstruct TypeList<Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n    T44, T45, T46, T47, T48, T49, T50> > {\n  typedef typename Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,\n      T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>::type type;\n};\n\n#endif  // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P\n\n}  // namespace internal\n}  // namespace testing\n\n#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_\n\n// Due to C++ preprocessor weirdness, we need double indirection to\n// concatenate two tokens when one of them is __LINE__.  Writing\n//\n//   foo ## __LINE__\n//\n// will result in the token foo__LINE__, instead of foo followed by\n// the current line number.  For more details, see\n// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6\n#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar)\n#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar\n\nclass ProtocolMessage;\nnamespace proto2 { class Message; }\n\nnamespace testing {\n\n// Forward declarations.\n\nclass AssertionResult;                 // Result of an assertion.\nclass Message;                         // Represents a failure message.\nclass Test;                            // Represents a test.\nclass TestInfo;                        // Information about a test.\nclass TestPartResult;                  // Result of a test part.\nclass UnitTest;                        // A collection of test cases.\n\ntemplate <typename T>\n::std::string PrintToString(const T& value);\n\nnamespace internal {\n\nstruct TraceInfo;                      // Information about a trace point.\nclass ScopedTrace;                     // Implements scoped trace.\nclass TestInfoImpl;                    // Opaque implementation of TestInfo\nclass UnitTestImpl;                    // Opaque implementation of UnitTest\n\n// The text used in failure messages to indicate the start of the\n// stack trace.\nGTEST_API_ extern const char kStackTraceMarker[];\n\n// Two overloaded helpers for checking at compile time whether an\n// expression is a null pointer literal (i.e. NULL or any 0-valued\n// compile-time integral constant).  Their return values have\n// different sizes, so we can use sizeof() to test which version is\n// picked by the compiler.  These helpers have no implementations, as\n// we only need their signatures.\n//\n// Given IsNullLiteralHelper(x), the compiler will pick the first\n// version if x can be implicitly converted to Secret*, and pick the\n// second version otherwise.  Since Secret is a secret and incomplete\n// type, the only expression a user can write that has type Secret* is\n// a null pointer literal.  Therefore, we know that x is a null\n// pointer literal if and only if the first version is picked by the\n// compiler.\nchar IsNullLiteralHelper(Secret* p);\nchar (&IsNullLiteralHelper(...))[2];  // NOLINT\n\n// A compile-time bool constant that is true if and only if x is a\n// null pointer literal (i.e. NULL or any 0-valued compile-time\n// integral constant).\n#ifdef GTEST_ELLIPSIS_NEEDS_POD_\n// We lose support for NULL detection where the compiler doesn't like\n// passing non-POD classes through ellipsis (...).\n# define GTEST_IS_NULL_LITERAL_(x) false\n#else\n# define GTEST_IS_NULL_LITERAL_(x) \\\n    (sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1)\n#endif  // GTEST_ELLIPSIS_NEEDS_POD_\n\n// Appends the user-supplied message to the Google-Test-generated message.\nGTEST_API_ std::string AppendUserMessage(\n    const std::string& gtest_msg, const Message& user_msg);\n\n#if GTEST_HAS_EXCEPTIONS\n\n// This exception is thrown by (and only by) a failed Google Test\n// assertion when GTEST_FLAG(throw_on_failure) is true (if exceptions\n// are enabled).  We derive it from std::runtime_error, which is for\n// errors presumably detectable only at run time.  Since\n// std::runtime_error inherits from std::exception, many testing\n// frameworks know how to extract and print the message inside it.\nclass GTEST_API_ GoogleTestFailureException : public ::std::runtime_error {\n public:\n  explicit GoogleTestFailureException(const TestPartResult& failure);\n};\n\n#endif  // GTEST_HAS_EXCEPTIONS\n\n// A helper class for creating scoped traces in user programs.\nclass GTEST_API_ ScopedTrace {\n public:\n  // The c'tor pushes the given source file location and message onto\n  // a trace stack maintained by Google Test.\n  ScopedTrace(const char* file, int line, const Message& message);\n\n  // The d'tor pops the info pushed by the c'tor.\n  //\n  // Note that the d'tor is not virtual in order to be efficient.\n  // Don't inherit from ScopedTrace!\n  ~ScopedTrace();\n\n private:\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace);\n} GTEST_ATTRIBUTE_UNUSED_;  // A ScopedTrace object does its job in its\n                            // c'tor and d'tor.  Therefore it doesn't\n                            // need to be used otherwise.\n\nnamespace edit_distance {\n// Returns the optimal edits to go from 'left' to 'right'.\n// All edits cost the same, with replace having lower priority than\n// add/remove.\n// Simple implementation of the Wagner–Fischer algorithm.\n// See http://en.wikipedia.org/wiki/Wagner-Fischer_algorithm\nenum EditType { kMatch, kAdd, kRemove, kReplace };\nGTEST_API_ std::vector<EditType> CalculateOptimalEdits(\n    const std::vector<size_t>& left, const std::vector<size_t>& right);\n\n// Same as above, but the input is represented as strings.\nGTEST_API_ std::vector<EditType> CalculateOptimalEdits(\n    const std::vector<std::string>& left,\n    const std::vector<std::string>& right);\n\n// Create a diff of the input strings in Unified diff format.\nGTEST_API_ std::string CreateUnifiedDiff(const std::vector<std::string>& left,\n                                         const std::vector<std::string>& right,\n                                         size_t context = 2);\n\n}  // namespace edit_distance\n\n// Calculate the diff between 'left' and 'right' and return it in unified diff\n// format.\n// If not null, stores in 'total_line_count' the total number of lines found\n// in left + right.\nGTEST_API_ std::string DiffStrings(const std::string& left,\n                                   const std::string& right,\n                                   size_t* total_line_count);\n\n// Constructs and returns the message for an equality assertion\n// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.\n//\n// The first four parameters are the expressions used in the assertion\n// and their values, as strings.  For example, for ASSERT_EQ(foo, bar)\n// where foo is 5 and bar is 6, we have:\n//\n//   expected_expression: \"foo\"\n//   actual_expression:   \"bar\"\n//   expected_value:      \"5\"\n//   actual_value:        \"6\"\n//\n// The ignoring_case parameter is true iff the assertion is a\n// *_STRCASEEQ*.  When it's true, the string \" (ignoring case)\" will\n// be inserted into the message.\nGTEST_API_ AssertionResult EqFailure(const char* expected_expression,\n                                     const char* actual_expression,\n                                     const std::string& expected_value,\n                                     const std::string& actual_value,\n                                     bool ignoring_case);\n\n// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.\nGTEST_API_ std::string GetBoolAssertionFailureMessage(\n    const AssertionResult& assertion_result,\n    const char* expression_text,\n    const char* actual_predicate_value,\n    const char* expected_predicate_value);\n\n// This template class represents an IEEE floating-point number\n// (either single-precision or double-precision, depending on the\n// template parameters).\n//\n// The purpose of this class is to do more sophisticated number\n// comparison.  (Due to round-off error, etc, it's very unlikely that\n// two floating-points will be equal exactly.  Hence a naive\n// comparison by the == operation often doesn't work.)\n//\n// Format of IEEE floating-point:\n//\n//   The most-significant bit being the leftmost, an IEEE\n//   floating-point looks like\n//\n//     sign_bit exponent_bits fraction_bits\n//\n//   Here, sign_bit is a single bit that designates the sign of the\n//   number.\n//\n//   For float, there are 8 exponent bits and 23 fraction bits.\n//\n//   For double, there are 11 exponent bits and 52 fraction bits.\n//\n//   More details can be found at\n//   http://en.wikipedia.org/wiki/IEEE_floating-point_standard.\n//\n// Template parameter:\n//\n//   RawType: the raw floating-point type (either float or double)\ntemplate <typename RawType>\nclass FloatingPoint {\n public:\n  // Defines the unsigned integer type that has the same size as the\n  // floating point number.\n  typedef typename TypeWithSize<sizeof(RawType)>::UInt Bits;\n\n  // Constants.\n\n  // # of bits in a number.\n  static const size_t kBitCount = 8*sizeof(RawType);\n\n  // # of fraction bits in a number.\n  static const size_t kFractionBitCount =\n    std::numeric_limits<RawType>::digits - 1;\n\n  // # of exponent bits in a number.\n  static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount;\n\n  // The mask for the sign bit.\n  static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);\n\n  // The mask for the fraction bits.\n  static const Bits kFractionBitMask =\n    ~static_cast<Bits>(0) >> (kExponentBitCount + 1);\n\n  // The mask for the exponent bits.\n  static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask);\n\n  // How many ULP's (Units in the Last Place) we want to tolerate when\n  // comparing two numbers.  The larger the value, the more error we\n  // allow.  A 0 value means that two numbers must be exactly the same\n  // to be considered equal.\n  //\n  // The maximum error of a single floating-point operation is 0.5\n  // units in the last place.  On Intel CPU's, all floating-point\n  // calculations are done with 80-bit precision, while double has 64\n  // bits.  Therefore, 4 should be enough for ordinary use.\n  //\n  // See the following article for more details on ULP:\n  // http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/\n  static const size_t kMaxUlps = 4;\n\n  // Constructs a FloatingPoint from a raw floating-point number.\n  //\n  // On an Intel CPU, passing a non-normalized NAN (Not a Number)\n  // around may change its bits, although the new value is guaranteed\n  // to be also a NAN.  Therefore, don't expect this constructor to\n  // preserve the bits in x when x is a NAN.\n  explicit FloatingPoint(const RawType& x) { u_.value_ = x; }\n\n  // Static methods\n\n  // Reinterprets a bit pattern as a floating-point number.\n  //\n  // This function is needed to test the AlmostEquals() method.\n  static RawType ReinterpretBits(const Bits bits) {\n    FloatingPoint fp(0);\n    fp.u_.bits_ = bits;\n    return fp.u_.value_;\n  }\n\n  // Returns the floating-point number that represent positive infinity.\n  static RawType Infinity() {\n    return ReinterpretBits(kExponentBitMask);\n  }\n\n  // Returns the maximum representable finite floating-point number.\n  static RawType Max();\n\n  // Non-static methods\n\n  // Returns the bits that represents this number.\n  const Bits &bits() const { return u_.bits_; }\n\n  // Returns the exponent bits of this number.\n  Bits exponent_bits() const { return kExponentBitMask & u_.bits_; }\n\n  // Returns the fraction bits of this number.\n  Bits fraction_bits() const { return kFractionBitMask & u_.bits_; }\n\n  // Returns the sign bit of this number.\n  Bits sign_bit() const { return kSignBitMask & u_.bits_; }\n\n  // Returns true iff this is NAN (not a number).\n  bool is_nan() const {\n    // It's a NAN if the exponent bits are all ones and the fraction\n    // bits are not entirely zeros.\n    return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0);\n  }\n\n  // Returns true iff this number is at most kMaxUlps ULP's away from\n  // rhs.  In particular, this function:\n  //\n  //   - returns false if either number is (or both are) NAN.\n  //   - treats really large numbers as almost equal to infinity.\n  //   - thinks +0.0 and -0.0 are 0 DLP's apart.\n  bool AlmostEquals(const FloatingPoint& rhs) const {\n    // The IEEE standard says that any comparison operation involving\n    // a NAN must return false.\n    if (is_nan() || rhs.is_nan()) return false;\n\n    return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_)\n        <= kMaxUlps;\n  }\n\n private:\n  // The data type used to store the actual floating-point number.\n  union FloatingPointUnion {\n    RawType value_;  // The raw floating-point number.\n    Bits bits_;      // The bits that represent the number.\n  };\n\n  // Converts an integer from the sign-and-magnitude representation to\n  // the biased representation.  More precisely, let N be 2 to the\n  // power of (kBitCount - 1), an integer x is represented by the\n  // unsigned number x + N.\n  //\n  // For instance,\n  //\n  //   -N + 1 (the most negative number representable using\n  //          sign-and-magnitude) is represented by 1;\n  //   0      is represented by N; and\n  //   N - 1  (the biggest number representable using\n  //          sign-and-magnitude) is represented by 2N - 1.\n  //\n  // Read http://en.wikipedia.org/wiki/Signed_number_representations\n  // for more details on signed number representations.\n  static Bits SignAndMagnitudeToBiased(const Bits &sam) {\n    if (kSignBitMask & sam) {\n      // sam represents a negative number.\n      return ~sam + 1;\n    } else {\n      // sam represents a positive number.\n      return kSignBitMask | sam;\n    }\n  }\n\n  // Given two numbers in the sign-and-magnitude representation,\n  // returns the distance between them as an unsigned number.\n  static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1,\n                                                     const Bits &sam2) {\n    const Bits biased1 = SignAndMagnitudeToBiased(sam1);\n    const Bits biased2 = SignAndMagnitudeToBiased(sam2);\n    return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);\n  }\n\n  FloatingPointUnion u_;\n};\n\n// We cannot use std::numeric_limits<T>::max() as it clashes with the max()\n// macro defined by <windows.h>.\ntemplate <>\ninline float FloatingPoint<float>::Max() { return FLT_MAX; }\ntemplate <>\ninline double FloatingPoint<double>::Max() { return DBL_MAX; }\n\n// Typedefs the instances of the FloatingPoint template class that we\n// care to use.\ntypedef FloatingPoint<float> Float;\ntypedef FloatingPoint<double> Double;\n\n// In order to catch the mistake of putting tests that use different\n// test fixture classes in the same test case, we need to assign\n// unique IDs to fixture classes and compare them.  The TypeId type is\n// used to hold such IDs.  The user should treat TypeId as an opaque\n// type: the only operation allowed on TypeId values is to compare\n// them for equality using the == operator.\ntypedef const void* TypeId;\n\ntemplate <typename T>\nclass TypeIdHelper {\n public:\n  // dummy_ must not have a const type.  Otherwise an overly eager\n  // compiler (e.g. MSVC 7.1 & 8.0) may try to merge\n  // TypeIdHelper<T>::dummy_ for different Ts as an \"optimization\".\n  static bool dummy_;\n};\n\ntemplate <typename T>\nbool TypeIdHelper<T>::dummy_ = false;\n\n// GetTypeId<T>() returns the ID of type T.  Different values will be\n// returned for different types.  Calling the function twice with the\n// same type argument is guaranteed to return the same ID.\ntemplate <typename T>\nTypeId GetTypeId() {\n  // The compiler is required to allocate a different\n  // TypeIdHelper<T>::dummy_ variable for each T used to instantiate\n  // the template.  Therefore, the address of dummy_ is guaranteed to\n  // be unique.\n  return &(TypeIdHelper<T>::dummy_);\n}\n\n// Returns the type ID of ::testing::Test.  Always call this instead\n// of GetTypeId< ::testing::Test>() to get the type ID of\n// ::testing::Test, as the latter may give the wrong result due to a\n// suspected linker bug when compiling Google Test as a Mac OS X\n// framework.\nGTEST_API_ TypeId GetTestTypeId();\n\n// Defines the abstract factory interface that creates instances\n// of a Test object.\nclass TestFactoryBase {\n public:\n  virtual ~TestFactoryBase() {}\n\n  // Creates a test instance to run. The instance is both created and destroyed\n  // within TestInfoImpl::Run()\n  virtual Test* CreateTest() = 0;\n\n protected:\n  TestFactoryBase() {}\n\n private:\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase);\n};\n\n// This class provides implementation of TeastFactoryBase interface.\n// It is used in TEST and TEST_F macros.\ntemplate <class TestClass>\nclass TestFactoryImpl : public TestFactoryBase {\n public:\n  virtual Test* CreateTest() { return new TestClass; }\n};\n\n#if GTEST_OS_WINDOWS\n\n// Predicate-formatters for implementing the HRESULT checking macros\n// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}\n// We pass a long instead of HRESULT to avoid causing an\n// include dependency for the HRESULT type.\nGTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr,\n                                            long hr);  // NOLINT\nGTEST_API_ AssertionResult IsHRESULTFailure(const char* expr,\n                                            long hr);  // NOLINT\n\n#endif  // GTEST_OS_WINDOWS\n\n// Types of SetUpTestCase() and TearDownTestCase() functions.\ntypedef void (*SetUpTestCaseFunc)();\ntypedef void (*TearDownTestCaseFunc)();\n\nstruct CodeLocation {\n  CodeLocation(const string& a_file, int a_line) : file(a_file), line(a_line) {}\n\n  string file;\n  int line;\n};\n\n// Creates a new TestInfo object and registers it with Google Test;\n// returns the created object.\n//\n// Arguments:\n//\n//   test_case_name:   name of the test case\n//   name:             name of the test\n//   type_param        the name of the test's type parameter, or NULL if\n//                     this is not a typed or a type-parameterized test.\n//   value_param       text representation of the test's value parameter,\n//                     or NULL if this is not a type-parameterized test.\n//   code_location:    code location where the test is defined\n//   fixture_class_id: ID of the test fixture class\n//   set_up_tc:        pointer to the function that sets up the test case\n//   tear_down_tc:     pointer to the function that tears down the test case\n//   factory:          pointer to the factory that creates a test object.\n//                     The newly created TestInfo instance will assume\n//                     ownership of the factory object.\nGTEST_API_ TestInfo* MakeAndRegisterTestInfo(\n    const char* test_case_name,\n    const char* name,\n    const char* type_param,\n    const char* value_param,\n    CodeLocation code_location,\n    TypeId fixture_class_id,\n    SetUpTestCaseFunc set_up_tc,\n    TearDownTestCaseFunc tear_down_tc,\n    TestFactoryBase* factory);\n\n// If *pstr starts with the given prefix, modifies *pstr to be right\n// past the prefix and returns true; otherwise leaves *pstr unchanged\n// and returns false.  None of pstr, *pstr, and prefix can be NULL.\nGTEST_API_ bool SkipPrefix(const char* prefix, const char** pstr);\n\n#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P\n\n// State of the definition of a type-parameterized test case.\nclass GTEST_API_ TypedTestCasePState {\n public:\n  TypedTestCasePState() : registered_(false) {}\n\n  // Adds the given test name to defined_test_names_ and return true\n  // if the test case hasn't been registered; otherwise aborts the\n  // program.\n  bool AddTestName(const char* file, int line, const char* case_name,\n                   const char* test_name) {\n    if (registered_) {\n      fprintf(stderr, \"%s Test %s must be defined before \"\n              \"REGISTER_TYPED_TEST_CASE_P(%s, ...).\\n\",\n              FormatFileLocation(file, line).c_str(), test_name, case_name);\n      fflush(stderr);\n      posix::Abort();\n    }\n    registered_tests_.insert(\n        ::std::make_pair(test_name, CodeLocation(file, line)));\n    return true;\n  }\n\n  bool TestExists(const std::string& test_name) const {\n    return registered_tests_.count(test_name) > 0;\n  }\n\n  const CodeLocation& GetCodeLocation(const std::string& test_name) const {\n    RegisteredTestsMap::const_iterator it = registered_tests_.find(test_name);\n    GTEST_CHECK_(it != registered_tests_.end());\n    return it->second;\n  }\n\n  // Verifies that registered_tests match the test names in\n  // defined_test_names_; returns registered_tests if successful, or\n  // aborts the program otherwise.\n  const char* VerifyRegisteredTestNames(\n      const char* file, int line, const char* registered_tests);\n\n private:\n  typedef ::std::map<std::string, CodeLocation> RegisteredTestsMap;\n\n  bool registered_;\n  RegisteredTestsMap registered_tests_;\n};\n\n// Skips to the first non-space char after the first comma in 'str';\n// returns NULL if no comma is found in 'str'.\ninline const char* SkipComma(const char* str) {\n  const char* comma = strchr(str, ',');\n  if (comma == NULL) {\n    return NULL;\n  }\n  while (IsSpace(*(++comma))) {}\n  return comma;\n}\n\n// Returns the prefix of 'str' before the first comma in it; returns\n// the entire string if it contains no comma.\ninline std::string GetPrefixUntilComma(const char* str) {\n  const char* comma = strchr(str, ',');\n  return comma == NULL ? str : std::string(str, comma);\n}\n\n// Splits a given string on a given delimiter, populating a given\n// vector with the fields.\nvoid SplitString(const ::std::string& str, char delimiter,\n                 ::std::vector< ::std::string>* dest);\n\n// TypeParameterizedTest<Fixture, TestSel, Types>::Register()\n// registers a list of type-parameterized tests with Google Test.  The\n// return value is insignificant - we just need to return something\n// such that we can call this function in a namespace scope.\n//\n// Implementation note: The GTEST_TEMPLATE_ macro declares a template\n// template parameter.  It's defined in gtest-type-util.h.\ntemplate <GTEST_TEMPLATE_ Fixture, class TestSel, typename Types>\nclass TypeParameterizedTest {\n public:\n  // 'index' is the index of the test in the type list 'Types'\n  // specified in INSTANTIATE_TYPED_TEST_CASE_P(Prefix, TestCase,\n  // Types).  Valid values for 'index' are [0, N - 1] where N is the\n  // length of Types.\n  static bool Register(const char* prefix,\n                       CodeLocation code_location,\n                       const char* case_name, const char* test_names,\n                       int index) {\n    typedef typename Types::Head Type;\n    typedef Fixture<Type> FixtureClass;\n    typedef typename GTEST_BIND_(TestSel, Type) TestClass;\n\n    // First, registers the first type-parameterized test in the type\n    // list.\n    MakeAndRegisterTestInfo(\n        (std::string(prefix) + (prefix[0] == '\\0' ? \"\" : \"/\") + case_name + \"/\"\n         + StreamableToString(index)).c_str(),\n        StripTrailingSpaces(GetPrefixUntilComma(test_names)).c_str(),\n        GetTypeName<Type>().c_str(),\n        NULL,  // No value parameter.\n        code_location,\n        GetTypeId<FixtureClass>(),\n        TestClass::SetUpTestCase,\n        TestClass::TearDownTestCase,\n        new TestFactoryImpl<TestClass>);\n\n    // Next, recurses (at compile time) with the tail of the type list.\n    return TypeParameterizedTest<Fixture, TestSel, typename Types::Tail>\n        ::Register(prefix, code_location, case_name, test_names, index + 1);\n  }\n};\n\n// The base case for the compile time recursion.\ntemplate <GTEST_TEMPLATE_ Fixture, class TestSel>\nclass TypeParameterizedTest<Fixture, TestSel, Types0> {\n public:\n  static bool Register(const char* /*prefix*/, CodeLocation,\n                       const char* /*case_name*/, const char* /*test_names*/,\n                       int /*index*/) {\n    return true;\n  }\n};\n\n// TypeParameterizedTestCase<Fixture, Tests, Types>::Register()\n// registers *all combinations* of 'Tests' and 'Types' with Google\n// Test.  The return value is insignificant - we just need to return\n// something such that we can call this function in a namespace scope.\ntemplate <GTEST_TEMPLATE_ Fixture, typename Tests, typename Types>\nclass TypeParameterizedTestCase {\n public:\n  static bool Register(const char* prefix, CodeLocation code_location,\n                       const TypedTestCasePState* state,\n                       const char* case_name, const char* test_names) {\n    std::string test_name = StripTrailingSpaces(\n        GetPrefixUntilComma(test_names));\n    if (!state->TestExists(test_name)) {\n      fprintf(stderr, \"Failed to get code location for test %s.%s at %s.\",\n              case_name, test_name.c_str(),\n              FormatFileLocation(code_location.file.c_str(),\n                                 code_location.line).c_str());\n      fflush(stderr);\n      posix::Abort();\n    }\n    const CodeLocation& test_location = state->GetCodeLocation(test_name);\n\n    typedef typename Tests::Head Head;\n\n    // First, register the first test in 'Test' for each type in 'Types'.\n    TypeParameterizedTest<Fixture, Head, Types>::Register(\n        prefix, test_location, case_name, test_names, 0);\n\n    // Next, recurses (at compile time) with the tail of the test list.\n    return TypeParameterizedTestCase<Fixture, typename Tests::Tail, Types>\n        ::Register(prefix, code_location, state,\n                   case_name, SkipComma(test_names));\n  }\n};\n\n// The base case for the compile time recursion.\ntemplate <GTEST_TEMPLATE_ Fixture, typename Types>\nclass TypeParameterizedTestCase<Fixture, Templates0, Types> {\n public:\n  static bool Register(const char* /*prefix*/, CodeLocation,\n                       const TypedTestCasePState* /*state*/,\n                       const char* /*case_name*/, const char* /*test_names*/) {\n    return true;\n  }\n};\n\n#endif  // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P\n\n// Returns the current OS stack trace as an std::string.\n//\n// The maximum number of stack frames to be included is specified by\n// the gtest_stack_trace_depth flag.  The skip_count parameter\n// specifies the number of top frames to be skipped, which doesn't\n// count against the number of frames to be included.\n//\n// For example, if Foo() calls Bar(), which in turn calls\n// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in\n// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.\nGTEST_API_ std::string GetCurrentOsStackTraceExceptTop(\n    UnitTest* unit_test, int skip_count);\n\n// Helpers for suppressing warnings on unreachable code or constant\n// condition.\n\n// Always returns true.\nGTEST_API_ bool AlwaysTrue();\n\n// Always returns false.\ninline bool AlwaysFalse() { return !AlwaysTrue(); }\n\n// Helper for suppressing false warning from Clang on a const char*\n// variable declared in a conditional expression always being NULL in\n// the else branch.\nstruct GTEST_API_ ConstCharPtr {\n  ConstCharPtr(const char* str) : value(str) {}\n  operator bool() const { return true; }\n  const char* value;\n};\n\n// A simple Linear Congruential Generator for generating random\n// numbers with a uniform distribution.  Unlike rand() and srand(), it\n// doesn't use global state (and therefore can't interfere with user\n// code).  Unlike rand_r(), it's portable.  An LCG isn't very random,\n// but it's good enough for our purposes.\nclass GTEST_API_ Random {\n public:\n  static const UInt32 kMaxRange = 1u << 31;\n\n  explicit Random(UInt32 seed) : state_(seed) {}\n\n  void Reseed(UInt32 seed) { state_ = seed; }\n\n  // Generates a random number from [0, range).  Crashes if 'range' is\n  // 0 or greater than kMaxRange.\n  UInt32 Generate(UInt32 range);\n\n private:\n  UInt32 state_;\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(Random);\n};\n\n// Defining a variable of type CompileAssertTypesEqual<T1, T2> will cause a\n// compiler error iff T1 and T2 are different types.\ntemplate <typename T1, typename T2>\nstruct CompileAssertTypesEqual;\n\ntemplate <typename T>\nstruct CompileAssertTypesEqual<T, T> {\n};\n\n// Removes the reference from a type if it is a reference type,\n// otherwise leaves it unchanged.  This is the same as\n// tr1::remove_reference, which is not widely available yet.\ntemplate <typename T>\nstruct RemoveReference { typedef T type; };  // NOLINT\ntemplate <typename T>\nstruct RemoveReference<T&> { typedef T type; };  // NOLINT\n\n// A handy wrapper around RemoveReference that works when the argument\n// T depends on template parameters.\n#define GTEST_REMOVE_REFERENCE_(T) \\\n    typename ::testing::internal::RemoveReference<T>::type\n\n// Removes const from a type if it is a const type, otherwise leaves\n// it unchanged.  This is the same as tr1::remove_const, which is not\n// widely available yet.\ntemplate <typename T>\nstruct RemoveConst { typedef T type; };  // NOLINT\ntemplate <typename T>\nstruct RemoveConst<const T> { typedef T type; };  // NOLINT\n\n// MSVC 8.0, Sun C++, and IBM XL C++ have a bug which causes the above\n// definition to fail to remove the const in 'const int[3]' and 'const\n// char[3][4]'.  The following specialization works around the bug.\ntemplate <typename T, size_t N>\nstruct RemoveConst<const T[N]> {\n  typedef typename RemoveConst<T>::type type[N];\n};\n\n#if defined(_MSC_VER) && _MSC_VER < 1400\n// This is the only specialization that allows VC++ 7.1 to remove const in\n// 'const int[3] and 'const int[3][4]'.  However, it causes trouble with GCC\n// and thus needs to be conditionally compiled.\ntemplate <typename T, size_t N>\nstruct RemoveConst<T[N]> {\n  typedef typename RemoveConst<T>::type type[N];\n};\n#endif\n\n// A handy wrapper around RemoveConst that works when the argument\n// T depends on template parameters.\n#define GTEST_REMOVE_CONST_(T) \\\n    typename ::testing::internal::RemoveConst<T>::type\n\n// Turns const U&, U&, const U, and U all into U.\n#define GTEST_REMOVE_REFERENCE_AND_CONST_(T) \\\n    GTEST_REMOVE_CONST_(GTEST_REMOVE_REFERENCE_(T))\n\n// Adds reference to a type if it is not a reference type,\n// otherwise leaves it unchanged.  This is the same as\n// tr1::add_reference, which is not widely available yet.\ntemplate <typename T>\nstruct AddReference { typedef T& type; };  // NOLINT\ntemplate <typename T>\nstruct AddReference<T&> { typedef T& type; };  // NOLINT\n\n// A handy wrapper around AddReference that works when the argument T\n// depends on template parameters.\n#define GTEST_ADD_REFERENCE_(T) \\\n    typename ::testing::internal::AddReference<T>::type\n\n// Adds a reference to const on top of T as necessary.  For example,\n// it transforms\n//\n//   char         ==> const char&\n//   const char   ==> const char&\n//   char&        ==> const char&\n//   const char&  ==> const char&\n//\n// The argument T must depend on some template parameters.\n#define GTEST_REFERENCE_TO_CONST_(T) \\\n    GTEST_ADD_REFERENCE_(const GTEST_REMOVE_REFERENCE_(T))\n\n// ImplicitlyConvertible<From, To>::value is a compile-time bool\n// constant that's true iff type From can be implicitly converted to\n// type To.\ntemplate <typename From, typename To>\nclass ImplicitlyConvertible {\n private:\n  // We need the following helper functions only for their types.\n  // They have no implementations.\n\n  // MakeFrom() is an expression whose type is From.  We cannot simply\n  // use From(), as the type From may not have a public default\n  // constructor.\n  static typename AddReference<From>::type MakeFrom();\n\n  // These two functions are overloaded.  Given an expression\n  // Helper(x), the compiler will pick the first version if x can be\n  // implicitly converted to type To; otherwise it will pick the\n  // second version.\n  //\n  // The first version returns a value of size 1, and the second\n  // version returns a value of size 2.  Therefore, by checking the\n  // size of Helper(x), which can be done at compile time, we can tell\n  // which version of Helper() is used, and hence whether x can be\n  // implicitly converted to type To.\n  static char Helper(To);\n  static char (&Helper(...))[2];  // NOLINT\n\n  // We have to put the 'public' section after the 'private' section,\n  // or MSVC refuses to compile the code.\n public:\n#if defined(__BORLANDC__)\n  // C++Builder cannot use member overload resolution during template\n  // instantiation.  The simplest workaround is to use its C++0x type traits\n  // functions (C++Builder 2009 and above only).\n  static const bool value = __is_convertible(From, To);\n#else\n  // MSVC warns about implicitly converting from double to int for\n  // possible loss of data, so we need to temporarily disable the\n  // warning.\n  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4244)\n  static const bool value =\n      sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1;\n  GTEST_DISABLE_MSC_WARNINGS_POP_()\n#endif  // __BORLANDC__\n};\ntemplate <typename From, typename To>\nconst bool ImplicitlyConvertible<From, To>::value;\n\n// IsAProtocolMessage<T>::value is a compile-time bool constant that's\n// true iff T is type ProtocolMessage, proto2::Message, or a subclass\n// of those.\ntemplate <typename T>\nstruct IsAProtocolMessage\n    : public bool_constant<\n  ImplicitlyConvertible<const T*, const ::ProtocolMessage*>::value ||\n  ImplicitlyConvertible<const T*, const ::proto2::Message*>::value> {\n};\n\n// When the compiler sees expression IsContainerTest<C>(0), if C is an\n// STL-style container class, the first overload of IsContainerTest\n// will be viable (since both C::iterator* and C::const_iterator* are\n// valid types and NULL can be implicitly converted to them).  It will\n// be picked over the second overload as 'int' is a perfect match for\n// the type of argument 0.  If C::iterator or C::const_iterator is not\n// a valid type, the first overload is not viable, and the second\n// overload will be picked.  Therefore, we can determine whether C is\n// a container class by checking the type of IsContainerTest<C>(0).\n// The value of the expression is insignificant.\n//\n// Note that we look for both C::iterator and C::const_iterator.  The\n// reason is that C++ injects the name of a class as a member of the\n// class itself (e.g. you can refer to class iterator as either\n// 'iterator' or 'iterator::iterator').  If we look for C::iterator\n// only, for example, we would mistakenly think that a class named\n// iterator is an STL container.\n//\n// Also note that the simpler approach of overloading\n// IsContainerTest(typename C::const_iterator*) and\n// IsContainerTest(...) doesn't work with Visual Age C++ and Sun C++.\ntypedef int IsContainer;\ntemplate <class C>\nIsContainer IsContainerTest(int /* dummy */,\n                            typename C::iterator* /* it */ = NULL,\n                            typename C::const_iterator* /* const_it */ = NULL) {\n  return 0;\n}\n\ntypedef char IsNotContainer;\ntemplate <class C>\nIsNotContainer IsContainerTest(long /* dummy */) { return '\\0'; }\n\n// EnableIf<condition>::type is void when 'Cond' is true, and\n// undefined when 'Cond' is false.  To use SFINAE to make a function\n// overload only apply when a particular expression is true, add\n// \"typename EnableIf<expression>::type* = 0\" as the last parameter.\ntemplate<bool> struct EnableIf;\ntemplate<> struct EnableIf<true> { typedef void type; };  // NOLINT\n\n// Utilities for native arrays.\n\n// ArrayEq() compares two k-dimensional native arrays using the\n// elements' operator==, where k can be any integer >= 0.  When k is\n// 0, ArrayEq() degenerates into comparing a single pair of values.\n\ntemplate <typename T, typename U>\nbool ArrayEq(const T* lhs, size_t size, const U* rhs);\n\n// This generic version is used when k is 0.\ntemplate <typename T, typename U>\ninline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; }\n\n// This overload is used when k >= 1.\ntemplate <typename T, typename U, size_t N>\ninline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) {\n  return internal::ArrayEq(lhs, N, rhs);\n}\n\n// This helper reduces code bloat.  If we instead put its logic inside\n// the previous ArrayEq() function, arrays with different sizes would\n// lead to different copies of the template code.\ntemplate <typename T, typename U>\nbool ArrayEq(const T* lhs, size_t size, const U* rhs) {\n  for (size_t i = 0; i != size; i++) {\n    if (!internal::ArrayEq(lhs[i], rhs[i]))\n      return false;\n  }\n  return true;\n}\n\n// Finds the first element in the iterator range [begin, end) that\n// equals elem.  Element may be a native array type itself.\ntemplate <typename Iter, typename Element>\nIter ArrayAwareFind(Iter begin, Iter end, const Element& elem) {\n  for (Iter it = begin; it != end; ++it) {\n    if (internal::ArrayEq(*it, elem))\n      return it;\n  }\n  return end;\n}\n\n// CopyArray() copies a k-dimensional native array using the elements'\n// operator=, where k can be any integer >= 0.  When k is 0,\n// CopyArray() degenerates into copying a single value.\n\ntemplate <typename T, typename U>\nvoid CopyArray(const T* from, size_t size, U* to);\n\n// This generic version is used when k is 0.\ntemplate <typename T, typename U>\ninline void CopyArray(const T& from, U* to) { *to = from; }\n\n// This overload is used when k >= 1.\ntemplate <typename T, typename U, size_t N>\ninline void CopyArray(const T(&from)[N], U(*to)[N]) {\n  internal::CopyArray(from, N, *to);\n}\n\n// This helper reduces code bloat.  If we instead put its logic inside\n// the previous CopyArray() function, arrays with different sizes\n// would lead to different copies of the template code.\ntemplate <typename T, typename U>\nvoid CopyArray(const T* from, size_t size, U* to) {\n  for (size_t i = 0; i != size; i++) {\n    internal::CopyArray(from[i], to + i);\n  }\n}\n\n// The relation between an NativeArray object (see below) and the\n// native array it represents.\n// We use 2 different structs to allow non-copyable types to be used, as long\n// as RelationToSourceReference() is passed.\nstruct RelationToSourceReference {};\nstruct RelationToSourceCopy {};\n\n// Adapts a native array to a read-only STL-style container.  Instead\n// of the complete STL container concept, this adaptor only implements\n// members useful for Google Mock's container matchers.  New members\n// should be added as needed.  To simplify the implementation, we only\n// support Element being a raw type (i.e. having no top-level const or\n// reference modifier).  It's the client's responsibility to satisfy\n// this requirement.  Element can be an array type itself (hence\n// multi-dimensional arrays are supported).\ntemplate <typename Element>\nclass NativeArray {\n public:\n  // STL-style container typedefs.\n  typedef Element value_type;\n  typedef Element* iterator;\n  typedef const Element* const_iterator;\n\n  // Constructs from a native array. References the source.\n  NativeArray(const Element* array, size_t count, RelationToSourceReference) {\n    InitRef(array, count);\n  }\n\n  // Constructs from a native array. Copies the source.\n  NativeArray(const Element* array, size_t count, RelationToSourceCopy) {\n    InitCopy(array, count);\n  }\n\n  // Copy constructor.\n  NativeArray(const NativeArray& rhs) {\n    (this->*rhs.clone_)(rhs.array_, rhs.size_);\n  }\n\n  ~NativeArray() {\n    if (clone_ != &NativeArray::InitRef)\n      delete[] array_;\n  }\n\n  // STL-style container methods.\n  size_t size() const { return size_; }\n  const_iterator begin() const { return array_; }\n  const_iterator end() const { return array_ + size_; }\n  bool operator==(const NativeArray& rhs) const {\n    return size() == rhs.size() &&\n        ArrayEq(begin(), size(), rhs.begin());\n  }\n\n private:\n  enum {\n    kCheckTypeIsNotConstOrAReference = StaticAssertTypeEqHelper<\n        Element, GTEST_REMOVE_REFERENCE_AND_CONST_(Element)>::value,\n  };\n\n  // Initializes this object with a copy of the input.\n  void InitCopy(const Element* array, size_t a_size) {\n    Element* const copy = new Element[a_size];\n    CopyArray(array, a_size, copy);\n    array_ = copy;\n    size_ = a_size;\n    clone_ = &NativeArray::InitCopy;\n  }\n\n  // Initializes this object with a reference of the input.\n  void InitRef(const Element* array, size_t a_size) {\n    array_ = array;\n    size_ = a_size;\n    clone_ = &NativeArray::InitRef;\n  }\n\n  const Element* array_;\n  size_t size_;\n  void (NativeArray::*clone_)(const Element*, size_t);\n\n  GTEST_DISALLOW_ASSIGN_(NativeArray);\n};\n\n}  // namespace internal\n}  // namespace testing\n\n#define GTEST_MESSAGE_AT_(file, line, message, result_type) \\\n  ::testing::internal::AssertHelper(result_type, file, line, message) \\\n    = ::testing::Message()\n\n#define GTEST_MESSAGE_(message, result_type) \\\n  GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type)\n\n#define GTEST_FATAL_FAILURE_(message) \\\n  return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure)\n\n#define GTEST_NONFATAL_FAILURE_(message) \\\n  GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure)\n\n#define GTEST_SUCCESS_(message) \\\n  GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess)\n\n// Suppresses MSVC warnings 4072 (unreachable code) for the code following\n// statement if it returns or throws (or doesn't return or throw in some\n// situations).\n#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \\\n  if (::testing::internal::AlwaysTrue()) { statement; }\n\n#define GTEST_TEST_THROW_(statement, expected_exception, fail) \\\n  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\\n  if (::testing::internal::ConstCharPtr gtest_msg = \"\") { \\\n    bool gtest_caught_expected = false; \\\n    try { \\\n      GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \\\n    } \\\n    catch (expected_exception const&) { \\\n      gtest_caught_expected = true; \\\n    } \\\n    catch (...) { \\\n      gtest_msg.value = \\\n          \"Expected: \" #statement \" throws an exception of type \" \\\n          #expected_exception \".\\n  Actual: it throws a different type.\"; \\\n      goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \\\n    } \\\n    if (!gtest_caught_expected) { \\\n      gtest_msg.value = \\\n          \"Expected: \" #statement \" throws an exception of type \" \\\n          #expected_exception \".\\n  Actual: it throws nothing.\"; \\\n      goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \\\n    } \\\n  } else \\\n    GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \\\n      fail(gtest_msg.value)\n\n#define GTEST_TEST_NO_THROW_(statement, fail) \\\n  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\\n  if (::testing::internal::AlwaysTrue()) { \\\n    try { \\\n      GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \\\n    } \\\n    catch (...) { \\\n      goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \\\n    } \\\n  } else \\\n    GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \\\n      fail(\"Expected: \" #statement \" doesn't throw an exception.\\n\" \\\n           \"  Actual: it throws.\")\n\n#define GTEST_TEST_ANY_THROW_(statement, fail) \\\n  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\\n  if (::testing::internal::AlwaysTrue()) { \\\n    bool gtest_caught_any = false; \\\n    try { \\\n      GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \\\n    } \\\n    catch (...) { \\\n      gtest_caught_any = true; \\\n    } \\\n    if (!gtest_caught_any) { \\\n      goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \\\n    } \\\n  } else \\\n    GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \\\n      fail(\"Expected: \" #statement \" throws an exception.\\n\" \\\n           \"  Actual: it doesn't.\")\n\n\n// Implements Boolean test assertions such as EXPECT_TRUE. expression can be\n// either a boolean expression or an AssertionResult. text is a textual\n// represenation of expression as it was passed into the EXPECT_TRUE.\n#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \\\n  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\\n  if (const ::testing::AssertionResult gtest_ar_ = \\\n      ::testing::AssertionResult(expression)) \\\n    ; \\\n  else \\\n    fail(::testing::internal::GetBoolAssertionFailureMessage(\\\n        gtest_ar_, text, #actual, #expected).c_str())\n\n#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \\\n  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\\n  if (::testing::internal::AlwaysTrue()) { \\\n    ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \\\n    GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \\\n    if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \\\n      goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \\\n    } \\\n  } else \\\n    GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \\\n      fail(\"Expected: \" #statement \" doesn't generate new fatal \" \\\n           \"failures in the current thread.\\n\" \\\n           \"  Actual: it does.\")\n\n// Expands to the name of the class that implements the given test.\n#define GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \\\n  test_case_name##_##test_name##_Test\n\n// Helper macro for defining tests.\n#define GTEST_TEST_(test_case_name, test_name, parent_class, parent_id)\\\nclass GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\\\n public:\\\n  GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\\\n private:\\\n  virtual void TestBody();\\\n  static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\\\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(\\\n      GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\\\n};\\\n\\\n::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\\\n  ::test_info_ =\\\n    ::testing::internal::MakeAndRegisterTestInfo(\\\n        #test_case_name, #test_name, NULL, NULL, \\\n        ::testing::internal::CodeLocation(__FILE__, __LINE__), \\\n        (parent_id), \\\n        parent_class::SetUpTestCase, \\\n        parent_class::TearDownTestCase, \\\n        new ::testing::internal::TestFactoryImpl<\\\n            GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\\\nvoid GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()\n\n#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_\n\n// Copyright 2005, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan)\n//\n// The Google C++ Testing Framework (Google Test)\n//\n// This header file defines the public API for death tests.  It is\n// #included by gtest.h so a user doesn't need to include this\n// directly.\n\n#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_\n#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_\n\n// Copyright 2005, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)\n//\n// The Google C++ Testing Framework (Google Test)\n//\n// This header file defines internal utilities needed for implementing\n// death tests.  They are subject to change without notice.\n\n#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_\n#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_\n\n\n#include <stdio.h>\n\nnamespace testing {\nnamespace internal {\n\nGTEST_DECLARE_string_(internal_run_death_test);\n\n// Names of the flags (needed for parsing Google Test flags).\nconst char kDeathTestStyleFlag[] = \"death_test_style\";\nconst char kDeathTestUseFork[] = \"death_test_use_fork\";\nconst char kInternalRunDeathTestFlag[] = \"internal_run_death_test\";\n\n#if GTEST_HAS_DEATH_TEST\n\n// DeathTest is a class that hides much of the complexity of the\n// GTEST_DEATH_TEST_ macro.  It is abstract; its static Create method\n// returns a concrete class that depends on the prevailing death test\n// style, as defined by the --gtest_death_test_style and/or\n// --gtest_internal_run_death_test flags.\n\n// In describing the results of death tests, these terms are used with\n// the corresponding definitions:\n//\n// exit status:  The integer exit information in the format specified\n//               by wait(2)\n// exit code:    The integer code passed to exit(3), _exit(2), or\n//               returned from main()\nclass GTEST_API_ DeathTest {\n public:\n  // Create returns false if there was an error determining the\n  // appropriate action to take for the current death test; for example,\n  // if the gtest_death_test_style flag is set to an invalid value.\n  // The LastMessage method will return a more detailed message in that\n  // case.  Otherwise, the DeathTest pointer pointed to by the \"test\"\n  // argument is set.  If the death test should be skipped, the pointer\n  // is set to NULL; otherwise, it is set to the address of a new concrete\n  // DeathTest object that controls the execution of the current test.\n  static bool Create(const char* statement, const RE* regex,\n                     const char* file, int line, DeathTest** test);\n  DeathTest();\n  virtual ~DeathTest() { }\n\n  // A helper class that aborts a death test when it's deleted.\n  class ReturnSentinel {\n   public:\n    explicit ReturnSentinel(DeathTest* test) : test_(test) { }\n    ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); }\n   private:\n    DeathTest* const test_;\n    GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel);\n  } GTEST_ATTRIBUTE_UNUSED_;\n\n  // An enumeration of possible roles that may be taken when a death\n  // test is encountered.  EXECUTE means that the death test logic should\n  // be executed immediately.  OVERSEE means that the program should prepare\n  // the appropriate environment for a child process to execute the death\n  // test, then wait for it to complete.\n  enum TestRole { OVERSEE_TEST, EXECUTE_TEST };\n\n  // An enumeration of the three reasons that a test might be aborted.\n  enum AbortReason {\n    TEST_ENCOUNTERED_RETURN_STATEMENT,\n    TEST_THREW_EXCEPTION,\n    TEST_DID_NOT_DIE\n  };\n\n  // Assumes one of the above roles.\n  virtual TestRole AssumeRole() = 0;\n\n  // Waits for the death test to finish and returns its status.\n  virtual int Wait() = 0;\n\n  // Returns true if the death test passed; that is, the test process\n  // exited during the test, its exit status matches a user-supplied\n  // predicate, and its stderr output matches a user-supplied regular\n  // expression.\n  // The user-supplied predicate may be a macro expression rather\n  // than a function pointer or functor, or else Wait and Passed could\n  // be combined.\n  virtual bool Passed(bool exit_status_ok) = 0;\n\n  // Signals that the death test did not die as expected.\n  virtual void Abort(AbortReason reason) = 0;\n\n  // Returns a human-readable outcome message regarding the outcome of\n  // the last death test.\n  static const char* LastMessage();\n\n  static void set_last_death_test_message(const std::string& message);\n\n private:\n  // A string containing a description of the outcome of the last death test.\n  static std::string last_death_test_message_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest);\n};\n\n// Factory interface for death tests.  May be mocked out for testing.\nclass DeathTestFactory {\n public:\n  virtual ~DeathTestFactory() { }\n  virtual bool Create(const char* statement, const RE* regex,\n                      const char* file, int line, DeathTest** test) = 0;\n};\n\n// A concrete DeathTestFactory implementation for normal use.\nclass DefaultDeathTestFactory : public DeathTestFactory {\n public:\n  virtual bool Create(const char* statement, const RE* regex,\n                      const char* file, int line, DeathTest** test);\n};\n\n// Returns true if exit_status describes a process that was terminated\n// by a signal, or exited normally with a nonzero exit code.\nGTEST_API_ bool ExitedUnsuccessfully(int exit_status);\n\n// Traps C++ exceptions escaping statement and reports them as test\n// failures. Note that trapping SEH exceptions is not implemented here.\n# if GTEST_HAS_EXCEPTIONS\n#  define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \\\n  try { \\\n    GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \\\n  } catch (const ::std::exception& gtest_exception) { \\\n    fprintf(\\\n        stderr, \\\n        \"\\n%s: Caught std::exception-derived exception escaping the \" \\\n        \"death test statement. Exception message: %s\\n\", \\\n        ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \\\n        gtest_exception.what()); \\\n    fflush(stderr); \\\n    death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \\\n  } catch (...) { \\\n    death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \\\n  }\n\n# else\n#  define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \\\n  GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)\n\n# endif\n\n// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*,\n// ASSERT_EXIT*, and EXPECT_EXIT*.\n# define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \\\n  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\\n  if (::testing::internal::AlwaysTrue()) { \\\n    const ::testing::internal::RE& gtest_regex = (regex); \\\n    ::testing::internal::DeathTest* gtest_dt; \\\n    if (!::testing::internal::DeathTest::Create(#statement, &gtest_regex, \\\n        __FILE__, __LINE__, &gtest_dt)) { \\\n      goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \\\n    } \\\n    if (gtest_dt != NULL) { \\\n      ::testing::internal::scoped_ptr< ::testing::internal::DeathTest> \\\n          gtest_dt_ptr(gtest_dt); \\\n      switch (gtest_dt->AssumeRole()) { \\\n        case ::testing::internal::DeathTest::OVERSEE_TEST: \\\n          if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \\\n            goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \\\n          } \\\n          break; \\\n        case ::testing::internal::DeathTest::EXECUTE_TEST: { \\\n          ::testing::internal::DeathTest::ReturnSentinel \\\n              gtest_sentinel(gtest_dt); \\\n          GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \\\n          gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \\\n          break; \\\n        } \\\n        default: \\\n          break; \\\n      } \\\n    } \\\n  } else \\\n    GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__): \\\n      fail(::testing::internal::DeathTest::LastMessage())\n// The symbol \"fail\" here expands to something into which a message\n// can be streamed.\n\n// This macro is for implementing ASSERT/EXPECT_DEBUG_DEATH when compiled in\n// NDEBUG mode. In this case we need the statements to be executed, the regex is\n// ignored, and the macro must accept a streamed message even though the message\n// is never printed.\n# define GTEST_EXECUTE_STATEMENT_(statement, regex) \\\n  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\\n  if (::testing::internal::AlwaysTrue()) { \\\n     GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \\\n  } else \\\n    ::testing::Message()\n\n// A class representing the parsed contents of the\n// --gtest_internal_run_death_test flag, as it existed when\n// RUN_ALL_TESTS was called.\nclass InternalRunDeathTestFlag {\n public:\n  InternalRunDeathTestFlag(const std::string& a_file,\n                           int a_line,\n                           int an_index,\n                           int a_write_fd)\n      : file_(a_file), line_(a_line), index_(an_index),\n        write_fd_(a_write_fd) {}\n\n  ~InternalRunDeathTestFlag() {\n    if (write_fd_ >= 0)\n      posix::Close(write_fd_);\n  }\n\n  const std::string& file() const { return file_; }\n  int line() const { return line_; }\n  int index() const { return index_; }\n  int write_fd() const { return write_fd_; }\n\n private:\n  std::string file_;\n  int line_;\n  int index_;\n  int write_fd_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag);\n};\n\n// Returns a newly created InternalRunDeathTestFlag object with fields\n// initialized from the GTEST_FLAG(internal_run_death_test) flag if\n// the flag is specified; otherwise returns NULL.\nInternalRunDeathTestFlag* ParseInternalRunDeathTestFlag();\n\n#else  // GTEST_HAS_DEATH_TEST\n\n// This macro is used for implementing macros such as\n// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where\n// death tests are not supported. Those macros must compile on such systems\n// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on\n// systems that support death tests. This allows one to write such a macro\n// on a system that does not support death tests and be sure that it will\n// compile on a death-test supporting system.\n//\n// Parameters:\n//   statement -  A statement that a macro such as EXPECT_DEATH would test\n//                for program termination. This macro has to make sure this\n//                statement is compiled but not executed, to ensure that\n//                EXPECT_DEATH_IF_SUPPORTED compiles with a certain\n//                parameter iff EXPECT_DEATH compiles with it.\n//   regex     -  A regex that a macro such as EXPECT_DEATH would use to test\n//                the output of statement.  This parameter has to be\n//                compiled but not evaluated by this macro, to ensure that\n//                this macro only accepts expressions that a macro such as\n//                EXPECT_DEATH would accept.\n//   terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED\n//                and a return statement for ASSERT_DEATH_IF_SUPPORTED.\n//                This ensures that ASSERT_DEATH_IF_SUPPORTED will not\n//                compile inside functions where ASSERT_DEATH doesn't\n//                compile.\n//\n//  The branch that has an always false condition is used to ensure that\n//  statement and regex are compiled (and thus syntactically correct) but\n//  never executed. The unreachable code macro protects the terminator\n//  statement from generating an 'unreachable code' warning in case\n//  statement unconditionally returns or throws. The Message constructor at\n//  the end allows the syntax of streaming additional messages into the\n//  macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH.\n# define GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, terminator) \\\n    GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\\n    if (::testing::internal::AlwaysTrue()) { \\\n      GTEST_LOG_(WARNING) \\\n          << \"Death tests are not supported on this platform.\\n\" \\\n          << \"Statement '\" #statement \"' cannot be verified.\"; \\\n    } else if (::testing::internal::AlwaysFalse()) { \\\n      ::testing::internal::RE::PartialMatch(\".*\", (regex)); \\\n      GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \\\n      terminator; \\\n    } else \\\n      ::testing::Message()\n\n#endif  // GTEST_HAS_DEATH_TEST\n\n}  // namespace internal\n}  // namespace testing\n\n#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_\n\nnamespace testing {\n\n// This flag controls the style of death tests.  Valid values are \"threadsafe\",\n// meaning that the death test child process will re-execute the test binary\n// from the start, running only a single death test, or \"fast\",\n// meaning that the child process will execute the test logic immediately\n// after forking.\nGTEST_DECLARE_string_(death_test_style);\n\n#if GTEST_HAS_DEATH_TEST\n\nnamespace internal {\n\n// Returns a Boolean value indicating whether the caller is currently\n// executing in the context of the death test child process.  Tools such as\n// Valgrind heap checkers may need this to modify their behavior in death\n// tests.  IMPORTANT: This is an internal utility.  Using it may break the\n// implementation of death tests.  User code MUST NOT use it.\nGTEST_API_ bool InDeathTestChild();\n\n}  // namespace internal\n\n// The following macros are useful for writing death tests.\n\n// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is\n// executed:\n//\n//   1. It generates a warning if there is more than one active\n//   thread.  This is because it's safe to fork() or clone() only\n//   when there is a single thread.\n//\n//   2. The parent process clone()s a sub-process and runs the death\n//   test in it; the sub-process exits with code 0 at the end of the\n//   death test, if it hasn't exited already.\n//\n//   3. The parent process waits for the sub-process to terminate.\n//\n//   4. The parent process checks the exit code and error message of\n//   the sub-process.\n//\n// Examples:\n//\n//   ASSERT_DEATH(server.SendMessage(56, \"Hello\"), \"Invalid port number\");\n//   for (int i = 0; i < 5; i++) {\n//     EXPECT_DEATH(server.ProcessRequest(i),\n//                  \"Invalid request .* in ProcessRequest()\")\n//                  << \"Failed to die on request \" << i;\n//   }\n//\n//   ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), \"Exiting\");\n//\n//   bool KilledBySIGHUP(int exit_code) {\n//     return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP;\n//   }\n//\n//   ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, \"Hanging up!\");\n//\n// On the regular expressions used in death tests:\n//\n//   On POSIX-compliant systems (*nix), we use the <regex.h> library,\n//   which uses the POSIX extended regex syntax.\n//\n//   On other platforms (e.g. Windows), we only support a simple regex\n//   syntax implemented as part of Google Test.  This limited\n//   implementation should be enough most of the time when writing\n//   death tests; though it lacks many features you can find in PCRE\n//   or POSIX extended regex syntax.  For example, we don't support\n//   union (\"x|y\"), grouping (\"(xy)\"), brackets (\"[xy]\"), and\n//   repetition count (\"x{5,7}\"), among others.\n//\n//   Below is the syntax that we do support.  We chose it to be a\n//   subset of both PCRE and POSIX extended regex, so it's easy to\n//   learn wherever you come from.  In the following: 'A' denotes a\n//   literal character, period (.), or a single \\\\ escape sequence;\n//   'x' and 'y' denote regular expressions; 'm' and 'n' are for\n//   natural numbers.\n//\n//     c     matches any literal character c\n//     \\\\d   matches any decimal digit\n//     \\\\D   matches any character that's not a decimal digit\n//     \\\\f   matches \\f\n//     \\\\n   matches \\n\n//     \\\\r   matches \\r\n//     \\\\s   matches any ASCII whitespace, including \\n\n//     \\\\S   matches any character that's not a whitespace\n//     \\\\t   matches \\t\n//     \\\\v   matches \\v\n//     \\\\w   matches any letter, _, or decimal digit\n//     \\\\W   matches any character that \\\\w doesn't match\n//     \\\\c   matches any literal character c, which must be a punctuation\n//     .     matches any single character except \\n\n//     A?    matches 0 or 1 occurrences of A\n//     A*    matches 0 or many occurrences of A\n//     A+    matches 1 or many occurrences of A\n//     ^     matches the beginning of a string (not that of each line)\n//     $     matches the end of a string (not that of each line)\n//     xy    matches x followed by y\n//\n//   If you accidentally use PCRE or POSIX extended regex features\n//   not implemented by us, you will get a run-time failure.  In that\n//   case, please try to rewrite your regular expression within the\n//   above syntax.\n//\n//   This implementation is *not* meant to be as highly tuned or robust\n//   as a compiled regex library, but should perform well enough for a\n//   death test, which already incurs significant overhead by launching\n//   a child process.\n//\n// Known caveats:\n//\n//   A \"threadsafe\" style death test obtains the path to the test\n//   program from argv[0] and re-executes it in the sub-process.  For\n//   simplicity, the current implementation doesn't search the PATH\n//   when launching the sub-process.  This means that the user must\n//   invoke the test program via a path that contains at least one\n//   path separator (e.g. path/to/foo_test and\n//   /absolute/path/to/bar_test are fine, but foo_test is not).  This\n//   is rarely a problem as people usually don't put the test binary\n//   directory in PATH.\n//\n// TODO(wan@google.com): make thread-safe death tests search the PATH.\n\n// Asserts that a given statement causes the program to exit, with an\n// integer exit status that satisfies predicate, and emitting error output\n// that matches regex.\n# define ASSERT_EXIT(statement, predicate, regex) \\\n    GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)\n\n// Like ASSERT_EXIT, but continues on to successive tests in the\n// test case, if any:\n# define EXPECT_EXIT(statement, predicate, regex) \\\n    GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)\n\n// Asserts that a given statement causes the program to exit, either by\n// explicitly exiting with a nonzero exit code or being killed by a\n// signal, and emitting error output that matches regex.\n# define ASSERT_DEATH(statement, regex) \\\n    ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)\n\n// Like ASSERT_DEATH, but continues on to successive tests in the\n// test case, if any:\n# define EXPECT_DEATH(statement, regex) \\\n    EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)\n\n// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:\n\n// Tests that an exit code describes a normal exit with a given exit code.\nclass GTEST_API_ ExitedWithCode {\n public:\n  explicit ExitedWithCode(int exit_code);\n  bool operator()(int exit_status) const;\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ExitedWithCode& other);\n\n  const int exit_code_;\n};\n\n# if !GTEST_OS_WINDOWS\n// Tests that an exit code describes an exit due to termination by a\n// given signal.\nclass GTEST_API_ KilledBySignal {\n public:\n  explicit KilledBySignal(int signum);\n  bool operator()(int exit_status) const;\n private:\n  const int signum_;\n};\n# endif  // !GTEST_OS_WINDOWS\n\n// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode.\n// The death testing framework causes this to have interesting semantics,\n// since the sideeffects of the call are only visible in opt mode, and not\n// in debug mode.\n//\n// In practice, this can be used to test functions that utilize the\n// LOG(DFATAL) macro using the following style:\n//\n// int DieInDebugOr12(int* sideeffect) {\n//   if (sideeffect) {\n//     *sideeffect = 12;\n//   }\n//   LOG(DFATAL) << \"death\";\n//   return 12;\n// }\n//\n// TEST(TestCase, TestDieOr12WorksInDgbAndOpt) {\n//   int sideeffect = 0;\n//   // Only asserts in dbg.\n//   EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), \"death\");\n//\n// #ifdef NDEBUG\n//   // opt-mode has sideeffect visible.\n//   EXPECT_EQ(12, sideeffect);\n// #else\n//   // dbg-mode no visible sideeffect.\n//   EXPECT_EQ(0, sideeffect);\n// #endif\n// }\n//\n// This will assert that DieInDebugReturn12InOpt() crashes in debug\n// mode, usually due to a DCHECK or LOG(DFATAL), but returns the\n// appropriate fallback value (12 in this case) in opt mode. If you\n// need to test that a function has appropriate side-effects in opt\n// mode, include assertions against the side-effects.  A general\n// pattern for this is:\n//\n// EXPECT_DEBUG_DEATH({\n//   // Side-effects here will have an effect after this statement in\n//   // opt mode, but none in debug mode.\n//   EXPECT_EQ(12, DieInDebugOr12(&sideeffect));\n// }, \"death\");\n//\n# ifdef NDEBUG\n\n#  define EXPECT_DEBUG_DEATH(statement, regex) \\\n  GTEST_EXECUTE_STATEMENT_(statement, regex)\n\n#  define ASSERT_DEBUG_DEATH(statement, regex) \\\n  GTEST_EXECUTE_STATEMENT_(statement, regex)\n\n# else\n\n#  define EXPECT_DEBUG_DEATH(statement, regex) \\\n  EXPECT_DEATH(statement, regex)\n\n#  define ASSERT_DEBUG_DEATH(statement, regex) \\\n  ASSERT_DEATH(statement, regex)\n\n# endif  // NDEBUG for EXPECT_DEBUG_DEATH\n#endif  // GTEST_HAS_DEATH_TEST\n\n// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and\n// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if\n// death tests are supported; otherwise they just issue a warning.  This is\n// useful when you are combining death test assertions with normal test\n// assertions in one test.\n#if GTEST_HAS_DEATH_TEST\n# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \\\n    EXPECT_DEATH(statement, regex)\n# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \\\n    ASSERT_DEATH(statement, regex)\n#else\n# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \\\n    GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, )\n# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \\\n    GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, return)\n#endif\n\n}  // namespace testing\n\n#endif  // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_\n// This file was GENERATED by command:\n//     pump.py gtest-param-test.h.pump\n// DO NOT EDIT BY HAND!!!\n\n// Copyright 2008, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Authors: vladl@google.com (Vlad Losev)\n//\n// Macros and functions for implementing parameterized tests\n// in Google C++ Testing Framework (Google Test)\n//\n// This file is generated by a SCRIPT.  DO NOT EDIT BY HAND!\n//\n#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_\n#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_\n\n\n// Value-parameterized tests allow you to test your code with different\n// parameters without writing multiple copies of the same test.\n//\n// Here is how you use value-parameterized tests:\n\n#if 0\n\n// To write value-parameterized tests, first you should define a fixture\n// class. It is usually derived from testing::TestWithParam<T> (see below for\n// another inheritance scheme that's sometimes useful in more complicated\n// class hierarchies), where the type of your parameter values.\n// TestWithParam<T> is itself derived from testing::Test. T can be any\n// copyable type. If it's a raw pointer, you are responsible for managing the\n// lifespan of the pointed values.\n\nclass FooTest : public ::testing::TestWithParam<const char*> {\n  // You can implement all the usual class fixture members here.\n};\n\n// Then, use the TEST_P macro to define as many parameterized tests\n// for this fixture as you want. The _P suffix is for \"parameterized\"\n// or \"pattern\", whichever you prefer to think.\n\nTEST_P(FooTest, DoesBlah) {\n  // Inside a test, access the test parameter with the GetParam() method\n  // of the TestWithParam<T> class:\n  EXPECT_TRUE(foo.Blah(GetParam()));\n  ...\n}\n\nTEST_P(FooTest, HasBlahBlah) {\n  ...\n}\n\n// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test\n// case with any set of parameters you want. Google Test defines a number\n// of functions for generating test parameters. They return what we call\n// (surprise!) parameter generators. Here is a  summary of them, which\n// are all in the testing namespace:\n//\n//\n//  Range(begin, end [, step]) - Yields values {begin, begin+step,\n//                               begin+step+step, ...}. The values do not\n//                               include end. step defaults to 1.\n//  Values(v1, v2, ..., vN)    - Yields values {v1, v2, ..., vN}.\n//  ValuesIn(container)        - Yields values from a C-style array, an STL\n//  ValuesIn(begin,end)          container, or an iterator range [begin, end).\n//  Bool()                     - Yields sequence {false, true}.\n//  Combine(g1, g2, ..., gN)   - Yields all combinations (the Cartesian product\n//                               for the math savvy) of the values generated\n//                               by the N generators.\n//\n// For more details, see comments at the definitions of these functions below\n// in this file.\n//\n// The following statement will instantiate tests from the FooTest test case\n// each with parameter values \"meeny\", \"miny\", and \"moe\".\n\nINSTANTIATE_TEST_CASE_P(InstantiationName,\n                        FooTest,\n                        Values(\"meeny\", \"miny\", \"moe\"));\n\n// To distinguish different instances of the pattern, (yes, you\n// can instantiate it more then once) the first argument to the\n// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the\n// actual test case name. Remember to pick unique prefixes for different\n// instantiations. The tests from the instantiation above will have\n// these names:\n//\n//    * InstantiationName/FooTest.DoesBlah/0 for \"meeny\"\n//    * InstantiationName/FooTest.DoesBlah/1 for \"miny\"\n//    * InstantiationName/FooTest.DoesBlah/2 for \"moe\"\n//    * InstantiationName/FooTest.HasBlahBlah/0 for \"meeny\"\n//    * InstantiationName/FooTest.HasBlahBlah/1 for \"miny\"\n//    * InstantiationName/FooTest.HasBlahBlah/2 for \"moe\"\n//\n// You can use these names in --gtest_filter.\n//\n// This statement will instantiate all tests from FooTest again, each\n// with parameter values \"cat\" and \"dog\":\n\nconst char* pets[] = {\"cat\", \"dog\"};\nINSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets));\n\n// The tests from the instantiation above will have these names:\n//\n//    * AnotherInstantiationName/FooTest.DoesBlah/0 for \"cat\"\n//    * AnotherInstantiationName/FooTest.DoesBlah/1 for \"dog\"\n//    * AnotherInstantiationName/FooTest.HasBlahBlah/0 for \"cat\"\n//    * AnotherInstantiationName/FooTest.HasBlahBlah/1 for \"dog\"\n//\n// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests\n// in the given test case, whether their definitions come before or\n// AFTER the INSTANTIATE_TEST_CASE_P statement.\n//\n// Please also note that generator expressions (including parameters to the\n// generators) are evaluated in InitGoogleTest(), after main() has started.\n// This allows the user on one hand, to adjust generator parameters in order\n// to dynamically determine a set of tests to run and on the other hand,\n// give the user a chance to inspect the generated tests with Google Test\n// reflection API before RUN_ALL_TESTS() is executed.\n//\n// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc\n// for more examples.\n//\n// In the future, we plan to publish the API for defining new parameter\n// generators. But for now this interface remains part of the internal\n// implementation and is subject to change.\n//\n//\n// A parameterized test fixture must be derived from testing::Test and from\n// testing::WithParamInterface<T>, where T is the type of the parameter\n// values. Inheriting from TestWithParam<T> satisfies that requirement because\n// TestWithParam<T> inherits from both Test and WithParamInterface. In more\n// complicated hierarchies, however, it is occasionally useful to inherit\n// separately from Test and WithParamInterface. For example:\n\nclass BaseTest : public ::testing::Test {\n  // You can inherit all the usual members for a non-parameterized test\n  // fixture here.\n};\n\nclass DerivedTest : public BaseTest, public ::testing::WithParamInterface<int> {\n  // The usual test fixture members go here too.\n};\n\nTEST_F(BaseTest, HasFoo) {\n  // This is an ordinary non-parameterized test.\n}\n\nTEST_P(DerivedTest, DoesBlah) {\n  // GetParam works just the same here as if you inherit from TestWithParam.\n  EXPECT_TRUE(foo.Blah(GetParam()));\n}\n\n#endif  // 0\n\n\n#if !GTEST_OS_SYMBIAN\n# include <utility>\n#endif\n\n// scripts/fuse_gtest.py depends on gtest's own header being #included\n// *unconditionally*.  Therefore these #includes cannot be moved\n// inside #if GTEST_HAS_PARAM_TEST.\n// Copyright 2008 Google Inc.\n// All Rights Reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: vladl@google.com (Vlad Losev)\n\n// Type and function utilities for implementing parameterized tests.\n\n#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_\n#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_\n\n#include <ctype.h>\n\n#include <iterator>\n#include <set>\n#include <utility>\n#include <vector>\n\n// scripts/fuse_gtest.py depends on gtest's own header being #included\n// *unconditionally*.  Therefore these #includes cannot be moved\n// inside #if GTEST_HAS_PARAM_TEST.\n// Copyright 2003 Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Authors: Dan Egnor (egnor@google.com)\n//\n// A \"smart\" pointer type with reference tracking.  Every pointer to a\n// particular object is kept on a circular linked list.  When the last pointer\n// to an object is destroyed or reassigned, the object is deleted.\n//\n// Used properly, this deletes the object when the last reference goes away.\n// There are several caveats:\n// - Like all reference counting schemes, cycles lead to leaks.\n// - Each smart pointer is actually two pointers (8 bytes instead of 4).\n// - Every time a pointer is assigned, the entire list of pointers to that\n//   object is traversed.  This class is therefore NOT SUITABLE when there\n//   will often be more than two or three pointers to a particular object.\n// - References are only tracked as long as linked_ptr<> objects are copied.\n//   If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS\n//   will happen (double deletion).\n//\n// A good use of this class is storing object references in STL containers.\n// You can safely put linked_ptr<> in a vector<>.\n// Other uses may not be as good.\n//\n// Note: If you use an incomplete type with linked_ptr<>, the class\n// *containing* linked_ptr<> must have a constructor and destructor (even\n// if they do nothing!).\n//\n// Bill Gibbons suggested we use something like this.\n//\n// Thread Safety:\n//   Unlike other linked_ptr implementations, in this implementation\n//   a linked_ptr object is thread-safe in the sense that:\n//     - it's safe to copy linked_ptr objects concurrently,\n//     - it's safe to copy *from* a linked_ptr and read its underlying\n//       raw pointer (e.g. via get()) concurrently, and\n//     - it's safe to write to two linked_ptrs that point to the same\n//       shared object concurrently.\n// TODO(wan@google.com): rename this to safe_linked_ptr to avoid\n// confusion with normal linked_ptr.\n\n#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_\n#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_\n\n#include <stdlib.h>\n#include <assert.h>\n\n\nnamespace testing {\nnamespace internal {\n\n// Protects copying of all linked_ptr objects.\nGTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_linked_ptr_mutex);\n\n// This is used internally by all instances of linked_ptr<>.  It needs to be\n// a non-template class because different types of linked_ptr<> can refer to\n// the same object (linked_ptr<Superclass>(obj) vs linked_ptr<Subclass>(obj)).\n// So, it needs to be possible for different types of linked_ptr to participate\n// in the same circular linked list, so we need a single class type here.\n//\n// DO NOT USE THIS CLASS DIRECTLY YOURSELF.  Use linked_ptr<T>.\nclass linked_ptr_internal {\n public:\n  // Create a new circle that includes only this instance.\n  void join_new() {\n    next_ = this;\n  }\n\n  // Many linked_ptr operations may change p.link_ for some linked_ptr\n  // variable p in the same circle as this object.  Therefore we need\n  // to prevent two such operations from occurring concurrently.\n  //\n  // Note that different types of linked_ptr objects can coexist in a\n  // circle (e.g. linked_ptr<Base>, linked_ptr<Derived1>, and\n  // linked_ptr<Derived2>).  Therefore we must use a single mutex to\n  // protect all linked_ptr objects.  This can create serious\n  // contention in production code, but is acceptable in a testing\n  // framework.\n\n  // Join an existing circle.\n  void join(linked_ptr_internal const* ptr)\n      GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) {\n    MutexLock lock(&g_linked_ptr_mutex);\n\n    linked_ptr_internal const* p = ptr;\n    while (p->next_ != ptr) {\n      assert(p->next_ != this &&\n             \"Trying to join() a linked ring we are already in. \"\n             \"Is GMock thread safety enabled?\");\n      p = p->next_;\n    }\n    p->next_ = this;\n    next_ = ptr;\n  }\n\n  // Leave whatever circle we're part of.  Returns true if we were the\n  // last member of the circle.  Once this is done, you can join() another.\n  bool depart()\n      GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) {\n    MutexLock lock(&g_linked_ptr_mutex);\n\n    if (next_ == this) return true;\n    linked_ptr_internal const* p = next_;\n    while (p->next_ != this) {\n      assert(p->next_ != next_ &&\n             \"Trying to depart() a linked ring we are not in. \"\n             \"Is GMock thread safety enabled?\");\n      p = p->next_;\n    }\n    p->next_ = next_;\n    return false;\n  }\n\n private:\n  mutable linked_ptr_internal const* next_;\n};\n\ntemplate <typename T>\nclass linked_ptr {\n public:\n  typedef T element_type;\n\n  // Take over ownership of a raw pointer.  This should happen as soon as\n  // possible after the object is created.\n  explicit linked_ptr(T* ptr = NULL) { capture(ptr); }\n  ~linked_ptr() { depart(); }\n\n  // Copy an existing linked_ptr<>, adding ourselves to the list of references.\n  template <typename U> linked_ptr(linked_ptr<U> const& ptr) { copy(&ptr); }\n  linked_ptr(linked_ptr const& ptr) {  // NOLINT\n    assert(&ptr != this);\n    copy(&ptr);\n  }\n\n  // Assignment releases the old value and acquires the new.\n  template <typename U> linked_ptr& operator=(linked_ptr<U> const& ptr) {\n    depart();\n    copy(&ptr);\n    return *this;\n  }\n\n  linked_ptr& operator=(linked_ptr const& ptr) {\n    if (&ptr != this) {\n      depart();\n      copy(&ptr);\n    }\n    return *this;\n  }\n\n  // Smart pointer members.\n  void reset(T* ptr = NULL) {\n    depart();\n    capture(ptr);\n  }\n  T* get() const { return value_; }\n  T* operator->() const { return value_; }\n  T& operator*() const { return *value_; }\n\n  bool operator==(T* p) const { return value_ == p; }\n  bool operator!=(T* p) const { return value_ != p; }\n  template <typename U>\n  bool operator==(linked_ptr<U> const& ptr) const {\n    return value_ == ptr.get();\n  }\n  template <typename U>\n  bool operator!=(linked_ptr<U> const& ptr) const {\n    return value_ != ptr.get();\n  }\n\n private:\n  template <typename U>\n  friend class linked_ptr;\n\n  T* value_;\n  linked_ptr_internal link_;\n\n  void depart() {\n    if (link_.depart()) delete value_;\n  }\n\n  void capture(T* ptr) {\n    value_ = ptr;\n    link_.join_new();\n  }\n\n  template <typename U> void copy(linked_ptr<U> const* ptr) {\n    value_ = ptr->get();\n    if (value_)\n      link_.join(&ptr->link_);\n    else\n      link_.join_new();\n  }\n};\n\ntemplate<typename T> inline\nbool operator==(T* ptr, const linked_ptr<T>& x) {\n  return ptr == x.get();\n}\n\ntemplate<typename T> inline\nbool operator!=(T* ptr, const linked_ptr<T>& x) {\n  return ptr != x.get();\n}\n\n// A function to convert T* into linked_ptr<T>\n// Doing e.g. make_linked_ptr(new FooBarBaz<type>(arg)) is a shorter notation\n// for linked_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))\ntemplate <typename T>\nlinked_ptr<T> make_linked_ptr(T* ptr) {\n  return linked_ptr<T>(ptr);\n}\n\n}  // namespace internal\n}  // namespace testing\n\n#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_\n// Copyright 2007, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan)\n\n// Google Test - The Google C++ Testing Framework\n//\n// This file implements a universal value printer that can print a\n// value of any type T:\n//\n//   void ::testing::internal::UniversalPrinter<T>::Print(value, ostream_ptr);\n//\n// A user can teach this function how to print a class type T by\n// defining either operator<<() or PrintTo() in the namespace that\n// defines T.  More specifically, the FIRST defined function in the\n// following list will be used (assuming T is defined in namespace\n// foo):\n//\n//   1. foo::PrintTo(const T&, ostream*)\n//   2. operator<<(ostream&, const T&) defined in either foo or the\n//      global namespace.\n//\n// If none of the above is defined, it will print the debug string of\n// the value if it is a protocol buffer, or print the raw bytes in the\n// value otherwise.\n//\n// To aid debugging: when T is a reference type, the address of the\n// value is also printed; when T is a (const) char pointer, both the\n// pointer value and the NUL-terminated string it points to are\n// printed.\n//\n// We also provide some convenient wrappers:\n//\n//   // Prints a value to a string.  For a (const or not) char\n//   // pointer, the NUL-terminated string (but not the pointer) is\n//   // printed.\n//   std::string ::testing::PrintToString(const T& value);\n//\n//   // Prints a value tersely: for a reference type, the referenced\n//   // value (but not the address) is printed; for a (const or not) char\n//   // pointer, the NUL-terminated string (but not the pointer) is\n//   // printed.\n//   void ::testing::internal::UniversalTersePrint(const T& value, ostream*);\n//\n//   // Prints value using the type inferred by the compiler.  The difference\n//   // from UniversalTersePrint() is that this function prints both the\n//   // pointer and the NUL-terminated string for a (const or not) char pointer.\n//   void ::testing::internal::UniversalPrint(const T& value, ostream*);\n//\n//   // Prints the fields of a tuple tersely to a string vector, one\n//   // element for each field. Tuple support must be enabled in\n//   // gtest-port.h.\n//   std::vector<string> UniversalTersePrintTupleFieldsToStrings(\n//       const Tuple& value);\n//\n// Known limitation:\n//\n// The print primitives print the elements of an STL-style container\n// using the compiler-inferred type of *iter where iter is a\n// const_iterator of the container.  When const_iterator is an input\n// iterator but not a forward iterator, this inferred type may not\n// match value_type, and the print output may be incorrect.  In\n// practice, this is rarely a problem as for most containers\n// const_iterator is a forward iterator.  We'll fix this if there's an\n// actual need for it.  Note that this fix cannot rely on value_type\n// being defined as many user-defined container types don't have\n// value_type.\n\n#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_\n#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_\n\n#include <ostream>  // NOLINT\n#include <sstream>\n#include <string>\n#include <utility>\n#include <vector>\n\n#if GTEST_HAS_STD_TUPLE_\n# include <tuple>\n#endif\n\nnamespace testing {\n\n// Definitions in the 'internal' and 'internal2' name spaces are\n// subject to change without notice.  DO NOT USE THEM IN USER CODE!\nnamespace internal2 {\n\n// Prints the given number of bytes in the given object to the given\n// ostream.\nGTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes,\n                                     size_t count,\n                                     ::std::ostream* os);\n\n// For selecting which printer to use when a given type has neither <<\n// nor PrintTo().\nenum TypeKind {\n  kProtobuf,              // a protobuf type\n  kConvertibleToInteger,  // a type implicitly convertible to BiggestInt\n                          // (e.g. a named or unnamed enum type)\n  kOtherType              // anything else\n};\n\n// TypeWithoutFormatter<T, kTypeKind>::PrintValue(value, os) is called\n// by the universal printer to print a value of type T when neither\n// operator<< nor PrintTo() is defined for T, where kTypeKind is the\n// \"kind\" of T as defined by enum TypeKind.\ntemplate <typename T, TypeKind kTypeKind>\nclass TypeWithoutFormatter {\n public:\n  // This default version is called when kTypeKind is kOtherType.\n  static void PrintValue(const T& value, ::std::ostream* os) {\n    PrintBytesInObjectTo(reinterpret_cast<const unsigned char*>(&value),\n                         sizeof(value), os);\n  }\n};\n\n// We print a protobuf using its ShortDebugString() when the string\n// doesn't exceed this many characters; otherwise we print it using\n// DebugString() for better readability.\nconst size_t kProtobufOneLinerMaxLength = 50;\n\ntemplate <typename T>\nclass TypeWithoutFormatter<T, kProtobuf> {\n public:\n  static void PrintValue(const T& value, ::std::ostream* os) {\n    const ::testing::internal::string short_str = value.ShortDebugString();\n    const ::testing::internal::string pretty_str =\n        short_str.length() <= kProtobufOneLinerMaxLength ?\n        short_str : (\"\\n\" + value.DebugString());\n    *os << (\"<\" + pretty_str + \">\");\n  }\n};\n\ntemplate <typename T>\nclass TypeWithoutFormatter<T, kConvertibleToInteger> {\n public:\n  // Since T has no << operator or PrintTo() but can be implicitly\n  // converted to BiggestInt, we print it as a BiggestInt.\n  //\n  // Most likely T is an enum type (either named or unnamed), in which\n  // case printing it as an integer is the desired behavior.  In case\n  // T is not an enum, printing it as an integer is the best we can do\n  // given that it has no user-defined printer.\n  static void PrintValue(const T& value, ::std::ostream* os) {\n    const internal::BiggestInt kBigInt = value;\n    *os << kBigInt;\n  }\n};\n\n// Prints the given value to the given ostream.  If the value is a\n// protocol message, its debug string is printed; if it's an enum or\n// of a type implicitly convertible to BiggestInt, it's printed as an\n// integer; otherwise the bytes in the value are printed.  This is\n// what UniversalPrinter<T>::Print() does when it knows nothing about\n// type T and T has neither << operator nor PrintTo().\n//\n// A user can override this behavior for a class type Foo by defining\n// a << operator in the namespace where Foo is defined.\n//\n// We put this operator in namespace 'internal2' instead of 'internal'\n// to simplify the implementation, as much code in 'internal' needs to\n// use << in STL, which would conflict with our own << were it defined\n// in 'internal'.\n//\n// Note that this operator<< takes a generic std::basic_ostream<Char,\n// CharTraits> type instead of the more restricted std::ostream.  If\n// we define it to take an std::ostream instead, we'll get an\n// \"ambiguous overloads\" compiler error when trying to print a type\n// Foo that supports streaming to std::basic_ostream<Char,\n// CharTraits>, as the compiler cannot tell whether\n// operator<<(std::ostream&, const T&) or\n// operator<<(std::basic_stream<Char, CharTraits>, const Foo&) is more\n// specific.\ntemplate <typename Char, typename CharTraits, typename T>\n::std::basic_ostream<Char, CharTraits>& operator<<(\n    ::std::basic_ostream<Char, CharTraits>& os, const T& x) {\n  TypeWithoutFormatter<T,\n      (internal::IsAProtocolMessage<T>::value ? kProtobuf :\n       internal::ImplicitlyConvertible<const T&, internal::BiggestInt>::value ?\n       kConvertibleToInteger : kOtherType)>::PrintValue(x, &os);\n  return os;\n}\n\n}  // namespace internal2\n}  // namespace testing\n\n// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up\n// magic needed for implementing UniversalPrinter won't work.\nnamespace testing_internal {\n\n// Used to print a value that is not an STL-style container when the\n// user doesn't define PrintTo() for it.\ntemplate <typename T>\nvoid DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) {\n  // With the following statement, during unqualified name lookup,\n  // testing::internal2::operator<< appears as if it was declared in\n  // the nearest enclosing namespace that contains both\n  // ::testing_internal and ::testing::internal2, i.e. the global\n  // namespace.  For more details, refer to the C++ Standard section\n  // 7.3.4-1 [namespace.udir].  This allows us to fall back onto\n  // testing::internal2::operator<< in case T doesn't come with a <<\n  // operator.\n  //\n  // We cannot write 'using ::testing::internal2::operator<<;', which\n  // gcc 3.3 fails to compile due to a compiler bug.\n  using namespace ::testing::internal2;  // NOLINT\n\n  // Assuming T is defined in namespace foo, in the next statement,\n  // the compiler will consider all of:\n  //\n  //   1. foo::operator<< (thanks to Koenig look-up),\n  //   2. ::operator<< (as the current namespace is enclosed in ::),\n  //   3. testing::internal2::operator<< (thanks to the using statement above).\n  //\n  // The operator<< whose type matches T best will be picked.\n  //\n  // We deliberately allow #2 to be a candidate, as sometimes it's\n  // impossible to define #1 (e.g. when foo is ::std, defining\n  // anything in it is undefined behavior unless you are a compiler\n  // vendor.).\n  *os << value;\n}\n\n}  // namespace testing_internal\n\nnamespace testing {\nnamespace internal {\n\n// FormatForComparison<ToPrint, OtherOperand>::Format(value) formats a\n// value of type ToPrint that is an operand of a comparison assertion\n// (e.g. ASSERT_EQ).  OtherOperand is the type of the other operand in\n// the comparison, and is used to help determine the best way to\n// format the value.  In particular, when the value is a C string\n// (char pointer) and the other operand is an STL string object, we\n// want to format the C string as a string, since we know it is\n// compared by value with the string object.  If the value is a char\n// pointer but the other operand is not an STL string object, we don't\n// know whether the pointer is supposed to point to a NUL-terminated\n// string, and thus want to print it as a pointer to be safe.\n//\n// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\n\n// The default case.\ntemplate <typename ToPrint, typename OtherOperand>\nclass FormatForComparison {\n public:\n  static ::std::string Format(const ToPrint& value) {\n    return ::testing::PrintToString(value);\n  }\n};\n\n// Array.\ntemplate <typename ToPrint, size_t N, typename OtherOperand>\nclass FormatForComparison<ToPrint[N], OtherOperand> {\n public:\n  static ::std::string Format(const ToPrint* value) {\n    return FormatForComparison<const ToPrint*, OtherOperand>::Format(value);\n  }\n};\n\n// By default, print C string as pointers to be safe, as we don't know\n// whether they actually point to a NUL-terminated string.\n\n#define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType)                \\\n  template <typename OtherOperand>                                      \\\n  class FormatForComparison<CharType*, OtherOperand> {                  \\\n   public:                                                              \\\n    static ::std::string Format(CharType* value) {                      \\\n      return ::testing::PrintToString(static_cast<const void*>(value)); \\\n    }                                                                   \\\n  }\n\nGTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char);\nGTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char);\nGTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(wchar_t);\nGTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const wchar_t);\n\n#undef GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_\n\n// If a C string is compared with an STL string object, we know it's meant\n// to point to a NUL-terminated string, and thus can print it as a string.\n\n#define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \\\n  template <>                                                           \\\n  class FormatForComparison<CharType*, OtherStringType> {               \\\n   public:                                                              \\\n    static ::std::string Format(CharType* value) {                      \\\n      return ::testing::PrintToString(value);                           \\\n    }                                                                   \\\n  }\n\nGTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::std::string);\nGTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::std::string);\n\n#if GTEST_HAS_GLOBAL_STRING\nGTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::string);\nGTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::string);\n#endif\n\n#if GTEST_HAS_GLOBAL_WSTRING\nGTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::wstring);\nGTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::wstring);\n#endif\n\n#if GTEST_HAS_STD_WSTRING\nGTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::std::wstring);\nGTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring);\n#endif\n\n#undef GTEST_IMPL_FORMAT_C_STRING_AS_STRING_\n\n// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc)\n// operand to be used in a failure message.  The type (but not value)\n// of the other operand may affect the format.  This allows us to\n// print a char* as a raw pointer when it is compared against another\n// char* or void*, and print it as a C string when it is compared\n// against an std::string object, for example.\n//\n// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\ntemplate <typename T1, typename T2>\nstd::string FormatForComparisonFailureMessage(\n    const T1& value, const T2& /* other_operand */) {\n  return FormatForComparison<T1, T2>::Format(value);\n}\n\n// UniversalPrinter<T>::Print(value, ostream_ptr) prints the given\n// value to the given ostream.  The caller must ensure that\n// 'ostream_ptr' is not NULL, or the behavior is undefined.\n//\n// We define UniversalPrinter as a class template (as opposed to a\n// function template), as we need to partially specialize it for\n// reference types, which cannot be done with function templates.\ntemplate <typename T>\nclass UniversalPrinter;\n\ntemplate <typename T>\nvoid UniversalPrint(const T& value, ::std::ostream* os);\n\n// Used to print an STL-style container when the user doesn't define\n// a PrintTo() for it.\ntemplate <typename C>\nvoid DefaultPrintTo(IsContainer /* dummy */,\n                    false_type /* is not a pointer */,\n                    const C& container, ::std::ostream* os) {\n  const size_t kMaxCount = 32;  // The maximum number of elements to print.\n  *os << '{';\n  size_t count = 0;\n  for (typename C::const_iterator it = container.begin();\n       it != container.end(); ++it, ++count) {\n    if (count > 0) {\n      *os << ',';\n      if (count == kMaxCount) {  // Enough has been printed.\n        *os << \" ...\";\n        break;\n      }\n    }\n    *os << ' ';\n    // We cannot call PrintTo(*it, os) here as PrintTo() doesn't\n    // handle *it being a native array.\n    internal::UniversalPrint(*it, os);\n  }\n\n  if (count > 0) {\n    *os << ' ';\n  }\n  *os << '}';\n}\n\n// Used to print a pointer that is neither a char pointer nor a member\n// pointer, when the user doesn't define PrintTo() for it.  (A member\n// variable pointer or member function pointer doesn't really point to\n// a location in the address space.  Their representation is\n// implementation-defined.  Therefore they will be printed as raw\n// bytes.)\ntemplate <typename T>\nvoid DefaultPrintTo(IsNotContainer /* dummy */,\n                    true_type /* is a pointer */,\n                    T* p, ::std::ostream* os) {\n  if (p == NULL) {\n    *os << \"NULL\";\n  } else {\n    // C++ doesn't allow casting from a function pointer to any object\n    // pointer.\n    //\n    // IsTrue() silences warnings: \"Condition is always true\",\n    // \"unreachable code\".\n    if (IsTrue(ImplicitlyConvertible<T*, const void*>::value)) {\n      // T is not a function type.  We just call << to print p,\n      // relying on ADL to pick up user-defined << for their pointer\n      // types, if any.\n      *os << p;\n    } else {\n      // T is a function type, so '*os << p' doesn't do what we want\n      // (it just prints p as bool).  We want to print p as a const\n      // void*.  However, we cannot cast it to const void* directly,\n      // even using reinterpret_cast, as earlier versions of gcc\n      // (e.g. 3.4.5) cannot compile the cast when p is a function\n      // pointer.  Casting to UInt64 first solves the problem.\n      *os << reinterpret_cast<const void*>(\n          reinterpret_cast<internal::UInt64>(p));\n    }\n  }\n}\n\n// Used to print a non-container, non-pointer value when the user\n// doesn't define PrintTo() for it.\ntemplate <typename T>\nvoid DefaultPrintTo(IsNotContainer /* dummy */,\n                    false_type /* is not a pointer */,\n                    const T& value, ::std::ostream* os) {\n  ::testing_internal::DefaultPrintNonContainerTo(value, os);\n}\n\n// Prints the given value using the << operator if it has one;\n// otherwise prints the bytes in it.  This is what\n// UniversalPrinter<T>::Print() does when PrintTo() is not specialized\n// or overloaded for type T.\n//\n// A user can override this behavior for a class type Foo by defining\n// an overload of PrintTo() in the namespace where Foo is defined.  We\n// give the user this option as sometimes defining a << operator for\n// Foo is not desirable (e.g. the coding style may prevent doing it,\n// or there is already a << operator but it doesn't do what the user\n// wants).\ntemplate <typename T>\nvoid PrintTo(const T& value, ::std::ostream* os) {\n  // DefaultPrintTo() is overloaded.  The type of its first two\n  // arguments determine which version will be picked.  If T is an\n  // STL-style container, the version for container will be called; if\n  // T is a pointer, the pointer version will be called; otherwise the\n  // generic version will be called.\n  //\n  // Note that we check for container types here, prior to we check\n  // for protocol message types in our operator<<.  The rationale is:\n  //\n  // For protocol messages, we want to give people a chance to\n  // override Google Mock's format by defining a PrintTo() or\n  // operator<<.  For STL containers, other formats can be\n  // incompatible with Google Mock's format for the container\n  // elements; therefore we check for container types here to ensure\n  // that our format is used.\n  //\n  // The second argument of DefaultPrintTo() is needed to bypass a bug\n  // in Symbian's C++ compiler that prevents it from picking the right\n  // overload between:\n  //\n  //   PrintTo(const T& x, ...);\n  //   PrintTo(T* x, ...);\n  DefaultPrintTo(IsContainerTest<T>(0), is_pointer<T>(), value, os);\n}\n\n// The following list of PrintTo() overloads tells\n// UniversalPrinter<T>::Print() how to print standard types (built-in\n// types, strings, plain arrays, and pointers).\n\n// Overloads for various char types.\nGTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os);\nGTEST_API_ void PrintTo(signed char c, ::std::ostream* os);\ninline void PrintTo(char c, ::std::ostream* os) {\n  // When printing a plain char, we always treat it as unsigned.  This\n  // way, the output won't be affected by whether the compiler thinks\n  // char is signed or not.\n  PrintTo(static_cast<unsigned char>(c), os);\n}\n\n// Overloads for other simple built-in types.\ninline void PrintTo(bool x, ::std::ostream* os) {\n  *os << (x ? \"true\" : \"false\");\n}\n\n// Overload for wchar_t type.\n// Prints a wchar_t as a symbol if it is printable or as its internal\n// code otherwise and also as its decimal code (except for L'\\0').\n// The L'\\0' char is printed as \"L'\\\\0'\". The decimal code is printed\n// as signed integer when wchar_t is implemented by the compiler\n// as a signed type and is printed as an unsigned integer when wchar_t\n// is implemented as an unsigned type.\nGTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os);\n\n// Overloads for C strings.\nGTEST_API_ void PrintTo(const char* s, ::std::ostream* os);\ninline void PrintTo(char* s, ::std::ostream* os) {\n  PrintTo(ImplicitCast_<const char*>(s), os);\n}\n\n// signed/unsigned char is often used for representing binary data, so\n// we print pointers to it as void* to be safe.\ninline void PrintTo(const signed char* s, ::std::ostream* os) {\n  PrintTo(ImplicitCast_<const void*>(s), os);\n}\ninline void PrintTo(signed char* s, ::std::ostream* os) {\n  PrintTo(ImplicitCast_<const void*>(s), os);\n}\ninline void PrintTo(const unsigned char* s, ::std::ostream* os) {\n  PrintTo(ImplicitCast_<const void*>(s), os);\n}\ninline void PrintTo(unsigned char* s, ::std::ostream* os) {\n  PrintTo(ImplicitCast_<const void*>(s), os);\n}\n\n// MSVC can be configured to define wchar_t as a typedef of unsigned\n// short.  It defines _NATIVE_WCHAR_T_DEFINED when wchar_t is a native\n// type.  When wchar_t is a typedef, defining an overload for const\n// wchar_t* would cause unsigned short* be printed as a wide string,\n// possibly causing invalid memory accesses.\n#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)\n// Overloads for wide C strings\nGTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os);\ninline void PrintTo(wchar_t* s, ::std::ostream* os) {\n  PrintTo(ImplicitCast_<const wchar_t*>(s), os);\n}\n#endif\n\n// Overload for C arrays.  Multi-dimensional arrays are printed\n// properly.\n\n// Prints the given number of elements in an array, without printing\n// the curly braces.\ntemplate <typename T>\nvoid PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) {\n  UniversalPrint(a[0], os);\n  for (size_t i = 1; i != count; i++) {\n    *os << \", \";\n    UniversalPrint(a[i], os);\n  }\n}\n\n// Overloads for ::string and ::std::string.\n#if GTEST_HAS_GLOBAL_STRING\nGTEST_API_ void PrintStringTo(const ::string&s, ::std::ostream* os);\ninline void PrintTo(const ::string& s, ::std::ostream* os) {\n  PrintStringTo(s, os);\n}\n#endif  // GTEST_HAS_GLOBAL_STRING\n\nGTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os);\ninline void PrintTo(const ::std::string& s, ::std::ostream* os) {\n  PrintStringTo(s, os);\n}\n\n// Overloads for ::wstring and ::std::wstring.\n#if GTEST_HAS_GLOBAL_WSTRING\nGTEST_API_ void PrintWideStringTo(const ::wstring&s, ::std::ostream* os);\ninline void PrintTo(const ::wstring& s, ::std::ostream* os) {\n  PrintWideStringTo(s, os);\n}\n#endif  // GTEST_HAS_GLOBAL_WSTRING\n\n#if GTEST_HAS_STD_WSTRING\nGTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os);\ninline void PrintTo(const ::std::wstring& s, ::std::ostream* os) {\n  PrintWideStringTo(s, os);\n}\n#endif  // GTEST_HAS_STD_WSTRING\n\n#if GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_\n// Helper function for printing a tuple.  T must be instantiated with\n// a tuple type.\ntemplate <typename T>\nvoid PrintTupleTo(const T& t, ::std::ostream* os);\n#endif  // GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_\n\n#if GTEST_HAS_TR1_TUPLE\n// Overload for ::std::tr1::tuple.  Needed for printing function arguments,\n// which are packed as tuples.\n\n// Overloaded PrintTo() for tuples of various arities.  We support\n// tuples of up-to 10 fields.  The following implementation works\n// regardless of whether tr1::tuple is implemented using the\n// non-standard variadic template feature or not.\n\ninline void PrintTo(const ::std::tr1::tuple<>& t, ::std::ostream* os) {\n  PrintTupleTo(t, os);\n}\n\ntemplate <typename T1>\nvoid PrintTo(const ::std::tr1::tuple<T1>& t, ::std::ostream* os) {\n  PrintTupleTo(t, os);\n}\n\ntemplate <typename T1, typename T2>\nvoid PrintTo(const ::std::tr1::tuple<T1, T2>& t, ::std::ostream* os) {\n  PrintTupleTo(t, os);\n}\n\ntemplate <typename T1, typename T2, typename T3>\nvoid PrintTo(const ::std::tr1::tuple<T1, T2, T3>& t, ::std::ostream* os) {\n  PrintTupleTo(t, os);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4>\nvoid PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4>& t, ::std::ostream* os) {\n  PrintTupleTo(t, os);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5>\nvoid PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5>& t,\n             ::std::ostream* os) {\n  PrintTupleTo(t, os);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n          typename T6>\nvoid PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6>& t,\n             ::std::ostream* os) {\n  PrintTupleTo(t, os);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n          typename T6, typename T7>\nvoid PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7>& t,\n             ::std::ostream* os) {\n  PrintTupleTo(t, os);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n          typename T6, typename T7, typename T8>\nvoid PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8>& t,\n             ::std::ostream* os) {\n  PrintTupleTo(t, os);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n          typename T6, typename T7, typename T8, typename T9>\nvoid PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9>& t,\n             ::std::ostream* os) {\n  PrintTupleTo(t, os);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n          typename T6, typename T7, typename T8, typename T9, typename T10>\nvoid PrintTo(\n    const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>& t,\n    ::std::ostream* os) {\n  PrintTupleTo(t, os);\n}\n#endif  // GTEST_HAS_TR1_TUPLE\n\n#if GTEST_HAS_STD_TUPLE_\ntemplate <typename... Types>\nvoid PrintTo(const ::std::tuple<Types...>& t, ::std::ostream* os) {\n  PrintTupleTo(t, os);\n}\n#endif  // GTEST_HAS_STD_TUPLE_\n\n// Overload for std::pair.\ntemplate <typename T1, typename T2>\nvoid PrintTo(const ::std::pair<T1, T2>& value, ::std::ostream* os) {\n  *os << '(';\n  // We cannot use UniversalPrint(value.first, os) here, as T1 may be\n  // a reference type.  The same for printing value.second.\n  UniversalPrinter<T1>::Print(value.first, os);\n  *os << \", \";\n  UniversalPrinter<T2>::Print(value.second, os);\n  *os << ')';\n}\n\n// Implements printing a non-reference type T by letting the compiler\n// pick the right overload of PrintTo() for T.\ntemplate <typename T>\nclass UniversalPrinter {\n public:\n  // MSVC warns about adding const to a function type, so we want to\n  // disable the warning.\n  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180)\n\n  // Note: we deliberately don't call this PrintTo(), as that name\n  // conflicts with ::testing::internal::PrintTo in the body of the\n  // function.\n  static void Print(const T& value, ::std::ostream* os) {\n    // By default, ::testing::internal::PrintTo() is used for printing\n    // the value.\n    //\n    // Thanks to Koenig look-up, if T is a class and has its own\n    // PrintTo() function defined in its namespace, that function will\n    // be visible here.  Since it is more specific than the generic ones\n    // in ::testing::internal, it will be picked by the compiler in the\n    // following statement - exactly what we want.\n    PrintTo(value, os);\n  }\n\n  GTEST_DISABLE_MSC_WARNINGS_POP_()\n};\n\n// UniversalPrintArray(begin, len, os) prints an array of 'len'\n// elements, starting at address 'begin'.\ntemplate <typename T>\nvoid UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) {\n  if (len == 0) {\n    *os << \"{}\";\n  } else {\n    *os << \"{ \";\n    const size_t kThreshold = 18;\n    const size_t kChunkSize = 8;\n    // If the array has more than kThreshold elements, we'll have to\n    // omit some details by printing only the first and the last\n    // kChunkSize elements.\n    // TODO(wan@google.com): let the user control the threshold using a flag.\n    if (len <= kThreshold) {\n      PrintRawArrayTo(begin, len, os);\n    } else {\n      PrintRawArrayTo(begin, kChunkSize, os);\n      *os << \", ..., \";\n      PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os);\n    }\n    *os << \" }\";\n  }\n}\n// This overload prints a (const) char array compactly.\nGTEST_API_ void UniversalPrintArray(\n    const char* begin, size_t len, ::std::ostream* os);\n\n// This overload prints a (const) wchar_t array compactly.\nGTEST_API_ void UniversalPrintArray(\n    const wchar_t* begin, size_t len, ::std::ostream* os);\n\n// Implements printing an array type T[N].\ntemplate <typename T, size_t N>\nclass UniversalPrinter<T[N]> {\n public:\n  // Prints the given array, omitting some elements when there are too\n  // many.\n  static void Print(const T (&a)[N], ::std::ostream* os) {\n    UniversalPrintArray(a, N, os);\n  }\n};\n\n// Implements printing a reference type T&.\ntemplate <typename T>\nclass UniversalPrinter<T&> {\n public:\n  // MSVC warns about adding const to a function type, so we want to\n  // disable the warning.\n  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180)\n\n  static void Print(const T& value, ::std::ostream* os) {\n    // Prints the address of the value.  We use reinterpret_cast here\n    // as static_cast doesn't compile when T is a function type.\n    *os << \"@\" << reinterpret_cast<const void*>(&value) << \" \";\n\n    // Then prints the value itself.\n    UniversalPrint(value, os);\n  }\n\n  GTEST_DISABLE_MSC_WARNINGS_POP_()\n};\n\n// Prints a value tersely: for a reference type, the referenced value\n// (but not the address) is printed; for a (const) char pointer, the\n// NUL-terminated string (but not the pointer) is printed.\n\ntemplate <typename T>\nclass UniversalTersePrinter {\n public:\n  static void Print(const T& value, ::std::ostream* os) {\n    UniversalPrint(value, os);\n  }\n};\ntemplate <typename T>\nclass UniversalTersePrinter<T&> {\n public:\n  static void Print(const T& value, ::std::ostream* os) {\n    UniversalPrint(value, os);\n  }\n};\ntemplate <typename T, size_t N>\nclass UniversalTersePrinter<T[N]> {\n public:\n  static void Print(const T (&value)[N], ::std::ostream* os) {\n    UniversalPrinter<T[N]>::Print(value, os);\n  }\n};\ntemplate <>\nclass UniversalTersePrinter<const char*> {\n public:\n  static void Print(const char* str, ::std::ostream* os) {\n    if (str == NULL) {\n      *os << \"NULL\";\n    } else {\n      UniversalPrint(string(str), os);\n    }\n  }\n};\ntemplate <>\nclass UniversalTersePrinter<char*> {\n public:\n  static void Print(char* str, ::std::ostream* os) {\n    UniversalTersePrinter<const char*>::Print(str, os);\n  }\n};\n\n#if GTEST_HAS_STD_WSTRING\ntemplate <>\nclass UniversalTersePrinter<const wchar_t*> {\n public:\n  static void Print(const wchar_t* str, ::std::ostream* os) {\n    if (str == NULL) {\n      *os << \"NULL\";\n    } else {\n      UniversalPrint(::std::wstring(str), os);\n    }\n  }\n};\n#endif\n\ntemplate <>\nclass UniversalTersePrinter<wchar_t*> {\n public:\n  static void Print(wchar_t* str, ::std::ostream* os) {\n    UniversalTersePrinter<const wchar_t*>::Print(str, os);\n  }\n};\n\ntemplate <typename T>\nvoid UniversalTersePrint(const T& value, ::std::ostream* os) {\n  UniversalTersePrinter<T>::Print(value, os);\n}\n\n// Prints a value using the type inferred by the compiler.  The\n// difference between this and UniversalTersePrint() is that for a\n// (const) char pointer, this prints both the pointer and the\n// NUL-terminated string.\ntemplate <typename T>\nvoid UniversalPrint(const T& value, ::std::ostream* os) {\n  // A workarond for the bug in VC++ 7.1 that prevents us from instantiating\n  // UniversalPrinter with T directly.\n  typedef T T1;\n  UniversalPrinter<T1>::Print(value, os);\n}\n\ntypedef ::std::vector<string> Strings;\n\n// TuplePolicy<TupleT> must provide:\n// - tuple_size\n//     size of tuple TupleT.\n// - get<size_t I>(const TupleT& t)\n//     static function extracting element I of tuple TupleT.\n// - tuple_element<size_t I>::type\n//     type of element I of tuple TupleT.\ntemplate <typename TupleT>\nstruct TuplePolicy;\n\n#if GTEST_HAS_TR1_TUPLE\ntemplate <typename TupleT>\nstruct TuplePolicy {\n  typedef TupleT Tuple;\n  static const size_t tuple_size = ::std::tr1::tuple_size<Tuple>::value;\n\n  template <size_t I>\n  struct tuple_element : ::std::tr1::tuple_element<I, Tuple> {};\n\n  template <size_t I>\n  static typename AddReference<\n      const typename ::std::tr1::tuple_element<I, Tuple>::type>::type get(\n      const Tuple& tuple) {\n    return ::std::tr1::get<I>(tuple);\n  }\n};\ntemplate <typename TupleT>\nconst size_t TuplePolicy<TupleT>::tuple_size;\n#endif  // GTEST_HAS_TR1_TUPLE\n\n#if GTEST_HAS_STD_TUPLE_\ntemplate <typename... Types>\nstruct TuplePolicy< ::std::tuple<Types...> > {\n  typedef ::std::tuple<Types...> Tuple;\n  static const size_t tuple_size = ::std::tuple_size<Tuple>::value;\n\n  template <size_t I>\n  struct tuple_element : ::std::tuple_element<I, Tuple> {};\n\n  template <size_t I>\n  static const typename ::std::tuple_element<I, Tuple>::type& get(\n      const Tuple& tuple) {\n    return ::std::get<I>(tuple);\n  }\n};\ntemplate <typename... Types>\nconst size_t TuplePolicy< ::std::tuple<Types...> >::tuple_size;\n#endif  // GTEST_HAS_STD_TUPLE_\n\n#if GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_\n// This helper template allows PrintTo() for tuples and\n// UniversalTersePrintTupleFieldsToStrings() to be defined by\n// induction on the number of tuple fields.  The idea is that\n// TuplePrefixPrinter<N>::PrintPrefixTo(t, os) prints the first N\n// fields in tuple t, and can be defined in terms of\n// TuplePrefixPrinter<N - 1>.\n//\n// The inductive case.\ntemplate <size_t N>\nstruct TuplePrefixPrinter {\n  // Prints the first N fields of a tuple.\n  template <typename Tuple>\n  static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) {\n    TuplePrefixPrinter<N - 1>::PrintPrefixTo(t, os);\n    GTEST_INTENTIONAL_CONST_COND_PUSH_()\n    if (N > 1) {\n    GTEST_INTENTIONAL_CONST_COND_POP_()\n      *os << \", \";\n    }\n    UniversalPrinter<\n        typename TuplePolicy<Tuple>::template tuple_element<N - 1>::type>\n        ::Print(TuplePolicy<Tuple>::template get<N - 1>(t), os);\n  }\n\n  // Tersely prints the first N fields of a tuple to a string vector,\n  // one element for each field.\n  template <typename Tuple>\n  static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) {\n    TuplePrefixPrinter<N - 1>::TersePrintPrefixToStrings(t, strings);\n    ::std::stringstream ss;\n    UniversalTersePrint(TuplePolicy<Tuple>::template get<N - 1>(t), &ss);\n    strings->push_back(ss.str());\n  }\n};\n\n// Base case.\ntemplate <>\nstruct TuplePrefixPrinter<0> {\n  template <typename Tuple>\n  static void PrintPrefixTo(const Tuple&, ::std::ostream*) {}\n\n  template <typename Tuple>\n  static void TersePrintPrefixToStrings(const Tuple&, Strings*) {}\n};\n\n// Helper function for printing a tuple.\n// Tuple must be either std::tr1::tuple or std::tuple type.\ntemplate <typename Tuple>\nvoid PrintTupleTo(const Tuple& t, ::std::ostream* os) {\n  *os << \"(\";\n  TuplePrefixPrinter<TuplePolicy<Tuple>::tuple_size>::PrintPrefixTo(t, os);\n  *os << \")\";\n}\n\n// Prints the fields of a tuple tersely to a string vector, one\n// element for each field.  See the comment before\n// UniversalTersePrint() for how we define \"tersely\".\ntemplate <typename Tuple>\nStrings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) {\n  Strings result;\n  TuplePrefixPrinter<TuplePolicy<Tuple>::tuple_size>::\n      TersePrintPrefixToStrings(value, &result);\n  return result;\n}\n#endif  // GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_\n\n}  // namespace internal\n\ntemplate <typename T>\n::std::string PrintToString(const T& value) {\n  ::std::stringstream ss;\n  internal::UniversalTersePrinter<T>::Print(value, &ss);\n  return ss.str();\n}\n\n}  // namespace testing\n\n// Include any custom printer added by the local installation.\n// We must include this header at the end to make sure it can use the\n// declarations from this file.\n// Copyright 2015, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// This file provides an injection point for custom printers in a local\n// installation of gTest.\n// It will be included from gtest-printers.h and the overrides in this file\n// will be visible to everyone.\n// See documentation at gtest/gtest-printers.h for details on how to define a\n// custom printer.\n//\n// ** Custom implementation starts here **\n\n#ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_\n#define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_\n\n#endif  // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_\n\n#endif  // GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_\n\n#if GTEST_HAS_PARAM_TEST\n\nnamespace testing {\n\n// Input to a parameterized test name generator, describing a test parameter.\n// Consists of the parameter value and the integer parameter index.\ntemplate <class ParamType>\nstruct TestParamInfo {\n  TestParamInfo(const ParamType& a_param, size_t an_index) :\n    param(a_param),\n    index(an_index) {}\n  ParamType param;\n  size_t index;\n};\n\n// A builtin parameterized test name generator which returns the result of\n// testing::PrintToString.\nstruct PrintToStringParamName {\n  template <class ParamType>\n  std::string operator()(const TestParamInfo<ParamType>& info) const {\n    return PrintToString(info.param);\n  }\n};\n\nnamespace internal {\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// Outputs a message explaining invalid registration of different\n// fixture class for the same test case. This may happen when\n// TEST_P macro is used to define two tests with the same name\n// but in different namespaces.\nGTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name,\n                                          CodeLocation code_location);\n\ntemplate <typename> class ParamGeneratorInterface;\ntemplate <typename> class ParamGenerator;\n\n// Interface for iterating over elements provided by an implementation\n// of ParamGeneratorInterface<T>.\ntemplate <typename T>\nclass ParamIteratorInterface {\n public:\n  virtual ~ParamIteratorInterface() {}\n  // A pointer to the base generator instance.\n  // Used only for the purposes of iterator comparison\n  // to make sure that two iterators belong to the same generator.\n  virtual const ParamGeneratorInterface<T>* BaseGenerator() const = 0;\n  // Advances iterator to point to the next element\n  // provided by the generator. The caller is responsible\n  // for not calling Advance() on an iterator equal to\n  // BaseGenerator()->End().\n  virtual void Advance() = 0;\n  // Clones the iterator object. Used for implementing copy semantics\n  // of ParamIterator<T>.\n  virtual ParamIteratorInterface* Clone() const = 0;\n  // Dereferences the current iterator and provides (read-only) access\n  // to the pointed value. It is the caller's responsibility not to call\n  // Current() on an iterator equal to BaseGenerator()->End().\n  // Used for implementing ParamGenerator<T>::operator*().\n  virtual const T* Current() const = 0;\n  // Determines whether the given iterator and other point to the same\n  // element in the sequence generated by the generator.\n  // Used for implementing ParamGenerator<T>::operator==().\n  virtual bool Equals(const ParamIteratorInterface& other) const = 0;\n};\n\n// Class iterating over elements provided by an implementation of\n// ParamGeneratorInterface<T>. It wraps ParamIteratorInterface<T>\n// and implements the const forward iterator concept.\ntemplate <typename T>\nclass ParamIterator {\n public:\n  typedef T value_type;\n  typedef const T& reference;\n  typedef ptrdiff_t difference_type;\n\n  // ParamIterator assumes ownership of the impl_ pointer.\n  ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {}\n  ParamIterator& operator=(const ParamIterator& other) {\n    if (this != &other)\n      impl_.reset(other.impl_->Clone());\n    return *this;\n  }\n\n  const T& operator*() const { return *impl_->Current(); }\n  const T* operator->() const { return impl_->Current(); }\n  // Prefix version of operator++.\n  ParamIterator& operator++() {\n    impl_->Advance();\n    return *this;\n  }\n  // Postfix version of operator++.\n  ParamIterator operator++(int /*unused*/) {\n    ParamIteratorInterface<T>* clone = impl_->Clone();\n    impl_->Advance();\n    return ParamIterator(clone);\n  }\n  bool operator==(const ParamIterator& other) const {\n    return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_);\n  }\n  bool operator!=(const ParamIterator& other) const {\n    return !(*this == other);\n  }\n\n private:\n  friend class ParamGenerator<T>;\n  explicit ParamIterator(ParamIteratorInterface<T>* impl) : impl_(impl) {}\n  scoped_ptr<ParamIteratorInterface<T> > impl_;\n};\n\n// ParamGeneratorInterface<T> is the binary interface to access generators\n// defined in other translation units.\ntemplate <typename T>\nclass ParamGeneratorInterface {\n public:\n  typedef T ParamType;\n\n  virtual ~ParamGeneratorInterface() {}\n\n  // Generator interface definition\n  virtual ParamIteratorInterface<T>* Begin() const = 0;\n  virtual ParamIteratorInterface<T>* End() const = 0;\n};\n\n// Wraps ParamGeneratorInterface<T> and provides general generator syntax\n// compatible with the STL Container concept.\n// This class implements copy initialization semantics and the contained\n// ParamGeneratorInterface<T> instance is shared among all copies\n// of the original object. This is possible because that instance is immutable.\ntemplate<typename T>\nclass ParamGenerator {\n public:\n  typedef ParamIterator<T> iterator;\n\n  explicit ParamGenerator(ParamGeneratorInterface<T>* impl) : impl_(impl) {}\n  ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {}\n\n  ParamGenerator& operator=(const ParamGenerator& other) {\n    impl_ = other.impl_;\n    return *this;\n  }\n\n  iterator begin() const { return iterator(impl_->Begin()); }\n  iterator end() const { return iterator(impl_->End()); }\n\n private:\n  linked_ptr<const ParamGeneratorInterface<T> > impl_;\n};\n\n// Generates values from a range of two comparable values. Can be used to\n// generate sequences of user-defined types that implement operator+() and\n// operator<().\n// This class is used in the Range() function.\ntemplate <typename T, typename IncrementT>\nclass RangeGenerator : public ParamGeneratorInterface<T> {\n public:\n  RangeGenerator(T begin, T end, IncrementT step)\n      : begin_(begin), end_(end),\n        step_(step), end_index_(CalculateEndIndex(begin, end, step)) {}\n  virtual ~RangeGenerator() {}\n\n  virtual ParamIteratorInterface<T>* Begin() const {\n    return new Iterator(this, begin_, 0, step_);\n  }\n  virtual ParamIteratorInterface<T>* End() const {\n    return new Iterator(this, end_, end_index_, step_);\n  }\n\n private:\n  class Iterator : public ParamIteratorInterface<T> {\n   public:\n    Iterator(const ParamGeneratorInterface<T>* base, T value, int index,\n             IncrementT step)\n        : base_(base), value_(value), index_(index), step_(step) {}\n    virtual ~Iterator() {}\n\n    virtual const ParamGeneratorInterface<T>* BaseGenerator() const {\n      return base_;\n    }\n    virtual void Advance() {\n      value_ = static_cast<T>(value_ + step_);\n      index_++;\n    }\n    virtual ParamIteratorInterface<T>* Clone() const {\n      return new Iterator(*this);\n    }\n    virtual const T* Current() const { return &value_; }\n    virtual bool Equals(const ParamIteratorInterface<T>& other) const {\n      // Having the same base generator guarantees that the other\n      // iterator is of the same type and we can downcast.\n      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())\n          << \"The program attempted to compare iterators \"\n          << \"from different generators.\" << std::endl;\n      const int other_index =\n          CheckedDowncastToActualType<const Iterator>(&other)->index_;\n      return index_ == other_index;\n    }\n\n   private:\n    Iterator(const Iterator& other)\n        : ParamIteratorInterface<T>(),\n          base_(other.base_), value_(other.value_), index_(other.index_),\n          step_(other.step_) {}\n\n    // No implementation - assignment is unsupported.\n    void operator=(const Iterator& other);\n\n    const ParamGeneratorInterface<T>* const base_;\n    T value_;\n    int index_;\n    const IncrementT step_;\n  };  // class RangeGenerator::Iterator\n\n  static int CalculateEndIndex(const T& begin,\n                               const T& end,\n                               const IncrementT& step) {\n    int end_index = 0;\n    for (T i = begin; i < end; i = static_cast<T>(i + step))\n      end_index++;\n    return end_index;\n  }\n\n  // No implementation - assignment is unsupported.\n  void operator=(const RangeGenerator& other);\n\n  const T begin_;\n  const T end_;\n  const IncrementT step_;\n  // The index for the end() iterator. All the elements in the generated\n  // sequence are indexed (0-based) to aid iterator comparison.\n  const int end_index_;\n};  // class RangeGenerator\n\n\n// Generates values from a pair of STL-style iterators. Used in the\n// ValuesIn() function. The elements are copied from the source range\n// since the source can be located on the stack, and the generator\n// is likely to persist beyond that stack frame.\ntemplate <typename T>\nclass ValuesInIteratorRangeGenerator : public ParamGeneratorInterface<T> {\n public:\n  template <typename ForwardIterator>\n  ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end)\n      : container_(begin, end) {}\n  virtual ~ValuesInIteratorRangeGenerator() {}\n\n  virtual ParamIteratorInterface<T>* Begin() const {\n    return new Iterator(this, container_.begin());\n  }\n  virtual ParamIteratorInterface<T>* End() const {\n    return new Iterator(this, container_.end());\n  }\n\n private:\n  typedef typename ::std::vector<T> ContainerType;\n\n  class Iterator : public ParamIteratorInterface<T> {\n   public:\n    Iterator(const ParamGeneratorInterface<T>* base,\n             typename ContainerType::const_iterator iterator)\n        : base_(base), iterator_(iterator) {}\n    virtual ~Iterator() {}\n\n    virtual const ParamGeneratorInterface<T>* BaseGenerator() const {\n      return base_;\n    }\n    virtual void Advance() {\n      ++iterator_;\n      value_.reset();\n    }\n    virtual ParamIteratorInterface<T>* Clone() const {\n      return new Iterator(*this);\n    }\n    // We need to use cached value referenced by iterator_ because *iterator_\n    // can return a temporary object (and of type other then T), so just\n    // having \"return &*iterator_;\" doesn't work.\n    // value_ is updated here and not in Advance() because Advance()\n    // can advance iterator_ beyond the end of the range, and we cannot\n    // detect that fact. The client code, on the other hand, is\n    // responsible for not calling Current() on an out-of-range iterator.\n    virtual const T* Current() const {\n      if (value_.get() == NULL)\n        value_.reset(new T(*iterator_));\n      return value_.get();\n    }\n    virtual bool Equals(const ParamIteratorInterface<T>& other) const {\n      // Having the same base generator guarantees that the other\n      // iterator is of the same type and we can downcast.\n      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())\n          << \"The program attempted to compare iterators \"\n          << \"from different generators.\" << std::endl;\n      return iterator_ ==\n          CheckedDowncastToActualType<const Iterator>(&other)->iterator_;\n    }\n\n   private:\n    Iterator(const Iterator& other)\n          // The explicit constructor call suppresses a false warning\n          // emitted by gcc when supplied with the -Wextra option.\n        : ParamIteratorInterface<T>(),\n          base_(other.base_),\n          iterator_(other.iterator_) {}\n\n    const ParamGeneratorInterface<T>* const base_;\n    typename ContainerType::const_iterator iterator_;\n    // A cached value of *iterator_. We keep it here to allow access by\n    // pointer in the wrapping iterator's operator->().\n    // value_ needs to be mutable to be accessed in Current().\n    // Use of scoped_ptr helps manage cached value's lifetime,\n    // which is bound by the lifespan of the iterator itself.\n    mutable scoped_ptr<const T> value_;\n  };  // class ValuesInIteratorRangeGenerator::Iterator\n\n  // No implementation - assignment is unsupported.\n  void operator=(const ValuesInIteratorRangeGenerator& other);\n\n  const ContainerType container_;\n};  // class ValuesInIteratorRangeGenerator\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// Default parameterized test name generator, returns a string containing the\n// integer test parameter index.\ntemplate <class ParamType>\nstd::string DefaultParamName(const TestParamInfo<ParamType>& info) {\n  Message name_stream;\n  name_stream << info.index;\n  return name_stream.GetString();\n}\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// Parameterized test name overload helpers, which help the\n// INSTANTIATE_TEST_CASE_P macro choose between the default parameterized\n// test name generator and user param name generator.\ntemplate <class ParamType, class ParamNameGenFunctor>\nParamNameGenFunctor GetParamNameGen(ParamNameGenFunctor func) {\n  return func;\n}\n\ntemplate <class ParamType>\nstruct ParamNameGenFunc {\n  typedef std::string Type(const TestParamInfo<ParamType>&);\n};\n\ntemplate <class ParamType>\ntypename ParamNameGenFunc<ParamType>::Type *GetParamNameGen() {\n  return DefaultParamName;\n}\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// Stores a parameter value and later creates tests parameterized with that\n// value.\ntemplate <class TestClass>\nclass ParameterizedTestFactory : public TestFactoryBase {\n public:\n  typedef typename TestClass::ParamType ParamType;\n  explicit ParameterizedTestFactory(ParamType parameter) :\n      parameter_(parameter) {}\n  virtual Test* CreateTest() {\n    TestClass::SetParam(&parameter_);\n    return new TestClass();\n  }\n\n private:\n  const ParamType parameter_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory);\n};\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// TestMetaFactoryBase is a base class for meta-factories that create\n// test factories for passing into MakeAndRegisterTestInfo function.\ntemplate <class ParamType>\nclass TestMetaFactoryBase {\n public:\n  virtual ~TestMetaFactoryBase() {}\n\n  virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0;\n};\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// TestMetaFactory creates test factories for passing into\n// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives\n// ownership of test factory pointer, same factory object cannot be passed\n// into that method twice. But ParameterizedTestCaseInfo is going to call\n// it for each Test/Parameter value combination. Thus it needs meta factory\n// creator class.\ntemplate <class TestCase>\nclass TestMetaFactory\n    : public TestMetaFactoryBase<typename TestCase::ParamType> {\n public:\n  typedef typename TestCase::ParamType ParamType;\n\n  TestMetaFactory() {}\n\n  virtual TestFactoryBase* CreateTestFactory(ParamType parameter) {\n    return new ParameterizedTestFactory<TestCase>(parameter);\n  }\n\n private:\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory);\n};\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// ParameterizedTestCaseInfoBase is a generic interface\n// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase\n// accumulates test information provided by TEST_P macro invocations\n// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations\n// and uses that information to register all resulting test instances\n// in RegisterTests method. The ParameterizeTestCaseRegistry class holds\n// a collection of pointers to the ParameterizedTestCaseInfo objects\n// and calls RegisterTests() on each of them when asked.\nclass ParameterizedTestCaseInfoBase {\n public:\n  virtual ~ParameterizedTestCaseInfoBase() {}\n\n  // Base part of test case name for display purposes.\n  virtual const string& GetTestCaseName() const = 0;\n  // Test case id to verify identity.\n  virtual TypeId GetTestCaseTypeId() const = 0;\n  // UnitTest class invokes this method to register tests in this\n  // test case right before running them in RUN_ALL_TESTS macro.\n  // This method should not be called more then once on any single\n  // instance of a ParameterizedTestCaseInfoBase derived class.\n  virtual void RegisterTests() = 0;\n\n protected:\n  ParameterizedTestCaseInfoBase() {}\n\n private:\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase);\n};\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P\n// macro invocations for a particular test case and generators\n// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that\n// test case. It registers tests with all values generated by all\n// generators when asked.\ntemplate <class TestCase>\nclass ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase {\n public:\n  // ParamType and GeneratorCreationFunc are private types but are required\n  // for declarations of public methods AddTestPattern() and\n  // AddTestCaseInstantiation().\n  typedef typename TestCase::ParamType ParamType;\n  // A function that returns an instance of appropriate generator type.\n  typedef ParamGenerator<ParamType>(GeneratorCreationFunc)();\n  typedef typename ParamNameGenFunc<ParamType>::Type ParamNameGeneratorFunc;\n\n  explicit ParameterizedTestCaseInfo(\n      const char* name, CodeLocation code_location)\n      : test_case_name_(name), code_location_(code_location) {}\n\n  // Test case base name for display purposes.\n  virtual const string& GetTestCaseName() const { return test_case_name_; }\n  // Test case id to verify identity.\n  virtual TypeId GetTestCaseTypeId() const { return GetTypeId<TestCase>(); }\n  // TEST_P macro uses AddTestPattern() to record information\n  // about a single test in a LocalTestInfo structure.\n  // test_case_name is the base name of the test case (without invocation\n  // prefix). test_base_name is the name of an individual test without\n  // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is\n  // test case base name and DoBar is test base name.\n  void AddTestPattern(const char* test_case_name,\n                      const char* test_base_name,\n                      TestMetaFactoryBase<ParamType>* meta_factory) {\n    tests_.push_back(linked_ptr<TestInfo>(new TestInfo(test_case_name,\n                                                       test_base_name,\n                                                       meta_factory)));\n  }\n  // INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information\n  // about a generator.\n  int AddTestCaseInstantiation(const string& instantiation_name,\n                               GeneratorCreationFunc* func,\n                               ParamNameGeneratorFunc* name_func,\n                               const char* file,\n                               int line) {\n    instantiations_.push_back(\n        InstantiationInfo(instantiation_name, func, name_func, file, line));\n    return 0;  // Return value used only to run this method in namespace scope.\n  }\n  // UnitTest class invokes this method to register tests in this test case\n  // test cases right before running tests in RUN_ALL_TESTS macro.\n  // This method should not be called more then once on any single\n  // instance of a ParameterizedTestCaseInfoBase derived class.\n  // UnitTest has a guard to prevent from calling this method more then once.\n  virtual void RegisterTests() {\n    for (typename TestInfoContainer::iterator test_it = tests_.begin();\n         test_it != tests_.end(); ++test_it) {\n      linked_ptr<TestInfo> test_info = *test_it;\n      for (typename InstantiationContainer::iterator gen_it =\n               instantiations_.begin(); gen_it != instantiations_.end();\n               ++gen_it) {\n        const string& instantiation_name = gen_it->name;\n        ParamGenerator<ParamType> generator((*gen_it->generator)());\n        ParamNameGeneratorFunc* name_func = gen_it->name_func;\n        const char* file = gen_it->file;\n        int line = gen_it->line;\n\n        string test_case_name;\n        if ( !instantiation_name.empty() )\n          test_case_name = instantiation_name + \"/\";\n        test_case_name += test_info->test_case_base_name;\n\n        size_t i = 0;\n        std::set<std::string> test_param_names;\n        for (typename ParamGenerator<ParamType>::iterator param_it =\n                 generator.begin();\n             param_it != generator.end(); ++param_it, ++i) {\n          Message test_name_stream;\n\n          std::string param_name = name_func(\n              TestParamInfo<ParamType>(*param_it, i));\n\n          GTEST_CHECK_(IsValidParamName(param_name))\n              << \"Parameterized test name '\" << param_name\n              << \"' is invalid, in \" << file\n              << \" line \" << line << std::endl;\n\n          GTEST_CHECK_(test_param_names.count(param_name) == 0)\n              << \"Duplicate parameterized test name '\" << param_name\n              << \"', in \" << file << \" line \" << line << std::endl;\n\n          test_param_names.insert(param_name);\n\n          test_name_stream << test_info->test_base_name << \"/\" << param_name;\n          MakeAndRegisterTestInfo(\n              test_case_name.c_str(),\n              test_name_stream.GetString().c_str(),\n              NULL,  // No type parameter.\n              PrintToString(*param_it).c_str(),\n              code_location_,\n              GetTestCaseTypeId(),\n              TestCase::SetUpTestCase,\n              TestCase::TearDownTestCase,\n              test_info->test_meta_factory->CreateTestFactory(*param_it));\n        }  // for param_it\n      }  // for gen_it\n    }  // for test_it\n  }  // RegisterTests\n\n private:\n  // LocalTestInfo structure keeps information about a single test registered\n  // with TEST_P macro.\n  struct TestInfo {\n    TestInfo(const char* a_test_case_base_name,\n             const char* a_test_base_name,\n             TestMetaFactoryBase<ParamType>* a_test_meta_factory) :\n        test_case_base_name(a_test_case_base_name),\n        test_base_name(a_test_base_name),\n        test_meta_factory(a_test_meta_factory) {}\n\n    const string test_case_base_name;\n    const string test_base_name;\n    const scoped_ptr<TestMetaFactoryBase<ParamType> > test_meta_factory;\n  };\n  typedef ::std::vector<linked_ptr<TestInfo> > TestInfoContainer;\n  // Records data received from INSTANTIATE_TEST_CASE_P macros:\n  //  <Instantiation name, Sequence generator creation function,\n  //     Name generator function, Source file, Source line>\n  struct InstantiationInfo {\n      InstantiationInfo(const std::string &name_in,\n                        GeneratorCreationFunc* generator_in,\n                        ParamNameGeneratorFunc* name_func_in,\n                        const char* file_in,\n                        int line_in)\n          : name(name_in),\n            generator(generator_in),\n            name_func(name_func_in),\n            file(file_in),\n            line(line_in) {}\n\n      std::string name;\n      GeneratorCreationFunc* generator;\n      ParamNameGeneratorFunc* name_func;\n      const char* file;\n      int line;\n  };\n  typedef ::std::vector<InstantiationInfo> InstantiationContainer;\n\n  static bool IsValidParamName(const std::string& name) {\n    // Check for empty string\n    if (name.empty())\n      return false;\n\n    // Check for invalid characters\n    for (std::string::size_type index = 0; index < name.size(); ++index) {\n      if (!isalnum(name[index]) && name[index] != '_')\n        return false;\n    }\n\n    return true;\n  }\n\n  const string test_case_name_;\n  CodeLocation code_location_;\n  TestInfoContainer tests_;\n  InstantiationContainer instantiations_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo);\n};  // class ParameterizedTestCaseInfo\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase\n// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P\n// macros use it to locate their corresponding ParameterizedTestCaseInfo\n// descriptors.\nclass ParameterizedTestCaseRegistry {\n public:\n  ParameterizedTestCaseRegistry() {}\n  ~ParameterizedTestCaseRegistry() {\n    for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();\n         it != test_case_infos_.end(); ++it) {\n      delete *it;\n    }\n  }\n\n  // Looks up or creates and returns a structure containing information about\n  // tests and instantiations of a particular test case.\n  template <class TestCase>\n  ParameterizedTestCaseInfo<TestCase>* GetTestCasePatternHolder(\n      const char* test_case_name,\n      CodeLocation code_location) {\n    ParameterizedTestCaseInfo<TestCase>* typed_test_info = NULL;\n    for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();\n         it != test_case_infos_.end(); ++it) {\n      if ((*it)->GetTestCaseName() == test_case_name) {\n        if ((*it)->GetTestCaseTypeId() != GetTypeId<TestCase>()) {\n          // Complain about incorrect usage of Google Test facilities\n          // and terminate the program since we cannot guaranty correct\n          // test case setup and tear-down in this case.\n          ReportInvalidTestCaseType(test_case_name, code_location);\n          posix::Abort();\n        } else {\n          // At this point we are sure that the object we found is of the same\n          // type we are looking for, so we downcast it to that type\n          // without further checks.\n          typed_test_info = CheckedDowncastToActualType<\n              ParameterizedTestCaseInfo<TestCase> >(*it);\n        }\n        break;\n      }\n    }\n    if (typed_test_info == NULL) {\n      typed_test_info = new ParameterizedTestCaseInfo<TestCase>(\n          test_case_name, code_location);\n      test_case_infos_.push_back(typed_test_info);\n    }\n    return typed_test_info;\n  }\n  void RegisterTests() {\n    for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();\n         it != test_case_infos_.end(); ++it) {\n      (*it)->RegisterTests();\n    }\n  }\n\n private:\n  typedef ::std::vector<ParameterizedTestCaseInfoBase*> TestCaseInfoContainer;\n\n  TestCaseInfoContainer test_case_infos_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry);\n};\n\n}  // namespace internal\n}  // namespace testing\n\n#endif  //  GTEST_HAS_PARAM_TEST\n\n#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_\n// This file was GENERATED by command:\n//     pump.py gtest-param-util-generated.h.pump\n// DO NOT EDIT BY HAND!!!\n\n// Copyright 2008 Google Inc.\n// All Rights Reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: vladl@google.com (Vlad Losev)\n\n// Type and function utilities for implementing parameterized tests.\n// This file is generated by a SCRIPT.  DO NOT EDIT BY HAND!\n//\n// Currently Google Test supports at most 50 arguments in Values,\n// and at most 10 arguments in Combine. Please contact\n// googletestframework@googlegroups.com if you need more.\n// Please note that the number of arguments to Combine is limited\n// by the maximum arity of the implementation of tuple which is\n// currently set at 10.\n\n#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_\n#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_\n\n// scripts/fuse_gtest.py depends on gtest's own header being #included\n// *unconditionally*.  Therefore these #includes cannot be moved\n// inside #if GTEST_HAS_PARAM_TEST.\n\n#if GTEST_HAS_PARAM_TEST\n\nnamespace testing {\n\n// Forward declarations of ValuesIn(), which is implemented in\n// include/gtest/gtest-param-test.h.\ntemplate <typename ForwardIterator>\ninternal::ParamGenerator<\n  typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>\nValuesIn(ForwardIterator begin, ForwardIterator end);\n\ntemplate <typename T, size_t N>\ninternal::ParamGenerator<T> ValuesIn(const T (&array)[N]);\n\ntemplate <class Container>\ninternal::ParamGenerator<typename Container::value_type> ValuesIn(\n    const Container& container);\n\nnamespace internal {\n\n// Used in the Values() function to provide polymorphic capabilities.\ntemplate <typename T1>\nclass ValueArray1 {\n public:\n  explicit ValueArray1(T1 v1) : v1_(v1) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray1& other);\n\n  const T1 v1_;\n};\n\ntemplate <typename T1, typename T2>\nclass ValueArray2 {\n public:\n  ValueArray2(T1 v1, T2 v2) : v1_(v1), v2_(v2) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray2& other);\n\n  const T1 v1_;\n  const T2 v2_;\n};\n\ntemplate <typename T1, typename T2, typename T3>\nclass ValueArray3 {\n public:\n  ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray3& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4>\nclass ValueArray4 {\n public:\n  ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3),\n      v4_(v4) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray4& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5>\nclass ValueArray5 {\n public:\n  ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3),\n      v4_(v4), v5_(v5) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray5& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6>\nclass ValueArray6 {\n public:\n  ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2),\n      v3_(v3), v4_(v4), v5_(v5), v6_(v6) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray6& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7>\nclass ValueArray7 {\n public:\n  ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1),\n      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray7& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8>\nclass ValueArray8 {\n public:\n  ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,\n      T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),\n      v8_(v8) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray8& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9>\nclass ValueArray9 {\n public:\n  ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,\n      T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),\n      v8_(v8), v9_(v9) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray9& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10>\nclass ValueArray10 {\n public:\n  ValueArray10(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),\n      v8_(v8), v9_(v9), v10_(v10) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray10& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11>\nclass ValueArray11 {\n public:\n  ValueArray11(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),\n      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray11& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12>\nclass ValueArray12 {\n public:\n  ValueArray12(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),\n      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray12& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13>\nclass ValueArray13 {\n public:\n  ValueArray13(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),\n      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),\n      v12_(v12), v13_(v13) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray13& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14>\nclass ValueArray14 {\n public:\n  ValueArray14(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) : v1_(v1), v2_(v2), v3_(v3),\n      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),\n      v11_(v11), v12_(v12), v13_(v13), v14_(v14) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray14& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15>\nclass ValueArray15 {\n public:\n  ValueArray15(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) : v1_(v1), v2_(v2),\n      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),\n      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray15& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16>\nclass ValueArray16 {\n public:\n  ValueArray16(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16) : v1_(v1),\n      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),\n      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),\n      v16_(v16) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray16& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17>\nclass ValueArray17 {\n public:\n  ValueArray17(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,\n      T17 v17) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),\n      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),\n      v15_(v15), v16_(v16), v17_(v17) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray17& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18>\nclass ValueArray18 {\n public:\n  ValueArray18(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),\n      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),\n      v15_(v15), v16_(v16), v17_(v17), v18_(v18) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray18& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19>\nclass ValueArray19 {\n public:\n  ValueArray19(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),\n      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),\n      v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray19& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20>\nclass ValueArray20 {\n public:\n  ValueArray20(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),\n      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),\n      v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),\n      v19_(v19), v20_(v20) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray20& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21>\nclass ValueArray21 {\n public:\n  ValueArray21(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),\n      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),\n      v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),\n      v18_(v18), v19_(v19), v20_(v20), v21_(v21) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray21& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22>\nclass ValueArray22 {\n public:\n  ValueArray22(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22) : v1_(v1), v2_(v2), v3_(v3),\n      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),\n      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),\n      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray22& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23>\nclass ValueArray23 {\n public:\n  ValueArray23(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23) : v1_(v1), v2_(v2),\n      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),\n      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),\n      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),\n      v23_(v23) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray23& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24>\nclass ValueArray24 {\n public:\n  ValueArray24(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24) : v1_(v1),\n      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),\n      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),\n      v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),\n      v22_(v22), v23_(v23), v24_(v24) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray24& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25>\nclass ValueArray25 {\n public:\n  ValueArray25(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,\n      T25 v25) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),\n      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),\n      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),\n      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray25& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26>\nclass ValueArray26 {\n public:\n  ValueArray26(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),\n      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),\n      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),\n      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray26& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27>\nclass ValueArray27 {\n public:\n  ValueArray27(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),\n      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),\n      v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),\n      v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),\n      v26_(v26), v27_(v27) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray27& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28>\nclass ValueArray28 {\n public:\n  ValueArray28(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),\n      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),\n      v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),\n      v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),\n      v25_(v25), v26_(v26), v27_(v27), v28_(v28) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray28& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29>\nclass ValueArray29 {\n public:\n  ValueArray29(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),\n      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),\n      v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),\n      v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),\n      v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray29& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30>\nclass ValueArray30 {\n public:\n  ValueArray30(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) : v1_(v1), v2_(v2), v3_(v3),\n      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),\n      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),\n      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),\n      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),\n      v29_(v29), v30_(v30) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray30& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31>\nclass ValueArray31 {\n public:\n  ValueArray31(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) : v1_(v1), v2_(v2),\n      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),\n      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),\n      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),\n      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),\n      v29_(v29), v30_(v30), v31_(v31) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray31& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32>\nclass ValueArray32 {\n public:\n  ValueArray32(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32) : v1_(v1),\n      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),\n      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),\n      v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),\n      v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),\n      v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray32& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33>\nclass ValueArray33 {\n public:\n  ValueArray33(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,\n      T33 v33) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),\n      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),\n      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),\n      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),\n      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),\n      v33_(v33) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray33& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34>\nclass ValueArray34 {\n public:\n  ValueArray34(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),\n      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),\n      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),\n      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),\n      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),\n      v33_(v33), v34_(v34) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray34& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35>\nclass ValueArray35 {\n public:\n  ValueArray35(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),\n      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),\n      v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),\n      v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),\n      v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),\n      v32_(v32), v33_(v33), v34_(v34), v35_(v35) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray35& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36>\nclass ValueArray36 {\n public:\n  ValueArray36(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),\n      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),\n      v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),\n      v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),\n      v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),\n      v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray36& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37>\nclass ValueArray37 {\n public:\n  ValueArray37(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),\n      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),\n      v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),\n      v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),\n      v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),\n      v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),\n      v36_(v36), v37_(v37) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray37& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38>\nclass ValueArray38 {\n public:\n  ValueArray38(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38) : v1_(v1), v2_(v2), v3_(v3),\n      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),\n      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),\n      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),\n      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),\n      v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),\n      v35_(v35), v36_(v36), v37_(v37), v38_(v38) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray38& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n  const T38 v38_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39>\nclass ValueArray39 {\n public:\n  ValueArray39(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39) : v1_(v1), v2_(v2),\n      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),\n      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),\n      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),\n      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),\n      v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),\n      v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),\n        static_cast<T>(v39_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray39& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n  const T38 v38_;\n  const T39 v39_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40>\nclass ValueArray40 {\n public:\n  ValueArray40(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) : v1_(v1),\n      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),\n      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),\n      v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),\n      v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),\n      v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),\n      v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),\n      v40_(v40) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),\n        static_cast<T>(v39_), static_cast<T>(v40_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray40& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n  const T38 v38_;\n  const T39 v39_;\n  const T40 v40_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41>\nclass ValueArray41 {\n public:\n  ValueArray41(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,\n      T41 v41) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),\n      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),\n      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),\n      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),\n      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),\n      v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),\n      v39_(v39), v40_(v40), v41_(v41) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),\n        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray41& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n  const T38 v38_;\n  const T39 v39_;\n  const T40 v40_;\n  const T41 v41_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42>\nclass ValueArray42 {\n public:\n  ValueArray42(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,\n      T42 v42) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),\n      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),\n      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),\n      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),\n      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),\n      v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),\n      v39_(v39), v40_(v40), v41_(v41), v42_(v42) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),\n        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),\n        static_cast<T>(v42_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray42& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n  const T38 v38_;\n  const T39 v39_;\n  const T40 v40_;\n  const T41 v41_;\n  const T42 v42_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43>\nclass ValueArray43 {\n public:\n  ValueArray43(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,\n      T42 v42, T43 v43) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),\n      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),\n      v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),\n      v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),\n      v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),\n      v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37),\n      v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),\n        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),\n        static_cast<T>(v42_), static_cast<T>(v43_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray43& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n  const T38 v38_;\n  const T39 v39_;\n  const T40 v40_;\n  const T41 v41_;\n  const T42 v42_;\n  const T43 v43_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44>\nclass ValueArray44 {\n public:\n  ValueArray44(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,\n      T42 v42, T43 v43, T44 v44) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),\n      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),\n      v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),\n      v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),\n      v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),\n      v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36),\n      v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42),\n      v43_(v43), v44_(v44) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),\n        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),\n        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray44& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n  const T38 v38_;\n  const T39 v39_;\n  const T40 v40_;\n  const T41 v41_;\n  const T42 v42_;\n  const T43 v43_;\n  const T44 v44_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45>\nclass ValueArray45 {\n public:\n  ValueArray45(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,\n      T42 v42, T43 v43, T44 v44, T45 v45) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),\n      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),\n      v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),\n      v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),\n      v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),\n      v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),\n      v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41),\n      v42_(v42), v43_(v43), v44_(v44), v45_(v45) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),\n        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),\n        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),\n        static_cast<T>(v45_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray45& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n  const T38 v38_;\n  const T39 v39_;\n  const T40 v40_;\n  const T41 v41_;\n  const T42 v42_;\n  const T43 v43_;\n  const T44 v44_;\n  const T45 v45_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46>\nclass ValueArray46 {\n public:\n  ValueArray46(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,\n      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) : v1_(v1), v2_(v2), v3_(v3),\n      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),\n      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),\n      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),\n      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),\n      v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),\n      v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),\n      v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),\n        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),\n        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),\n        static_cast<T>(v45_), static_cast<T>(v46_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray46& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n  const T38 v38_;\n  const T39 v39_;\n  const T40 v40_;\n  const T41 v41_;\n  const T42 v42_;\n  const T43 v43_;\n  const T44 v44_;\n  const T45 v45_;\n  const T46 v46_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47>\nclass ValueArray47 {\n public:\n  ValueArray47(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,\n      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) : v1_(v1), v2_(v2),\n      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),\n      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),\n      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),\n      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),\n      v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),\n      v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),\n      v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46),\n      v47_(v47) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),\n        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),\n        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),\n        static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray47& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n  const T38 v38_;\n  const T39 v39_;\n  const T40 v40_;\n  const T41 v41_;\n  const T42 v42_;\n  const T43 v43_;\n  const T44 v44_;\n  const T45 v45_;\n  const T46 v46_;\n  const T47 v47_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47, typename T48>\nclass ValueArray48 {\n public:\n  ValueArray48(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,\n      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48) : v1_(v1),\n      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),\n      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),\n      v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),\n      v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),\n      v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),\n      v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),\n      v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45),\n      v46_(v46), v47_(v47), v48_(v48) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),\n        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),\n        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),\n        static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),\n        static_cast<T>(v48_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray48& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n  const T38 v38_;\n  const T39 v39_;\n  const T40 v40_;\n  const T41 v41_;\n  const T42 v42_;\n  const T43 v43_;\n  const T44 v44_;\n  const T45 v45_;\n  const T46 v46_;\n  const T47 v47_;\n  const T48 v48_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47, typename T48, typename T49>\nclass ValueArray49 {\n public:\n  ValueArray49(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,\n      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48,\n      T49 v49) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),\n      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),\n      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),\n      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),\n      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),\n      v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),\n      v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),\n      v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),\n        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),\n        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),\n        static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),\n        static_cast<T>(v48_), static_cast<T>(v49_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray49& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n  const T38 v38_;\n  const T39 v39_;\n  const T40 v40_;\n  const T41 v41_;\n  const T42 v42_;\n  const T43 v43_;\n  const T44 v44_;\n  const T45 v45_;\n  const T46 v46_;\n  const T47 v47_;\n  const T48 v48_;\n  const T49 v49_;\n};\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47, typename T48, typename T49, typename T50>\nclass ValueArray50 {\n public:\n  ValueArray50(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,\n      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, T49 v49,\n      T50 v50) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),\n      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),\n      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),\n      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),\n      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),\n      v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),\n      v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),\n      v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49), v50_(v50) {}\n\n  template <typename T>\n  operator ParamGenerator<T>() const {\n    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),\n        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),\n        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),\n        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),\n        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),\n        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),\n        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),\n        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),\n        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),\n        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),\n        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),\n        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),\n        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),\n        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),\n        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),\n        static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),\n        static_cast<T>(v48_), static_cast<T>(v49_), static_cast<T>(v50_)};\n    return ValuesIn(array);\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const ValueArray50& other);\n\n  const T1 v1_;\n  const T2 v2_;\n  const T3 v3_;\n  const T4 v4_;\n  const T5 v5_;\n  const T6 v6_;\n  const T7 v7_;\n  const T8 v8_;\n  const T9 v9_;\n  const T10 v10_;\n  const T11 v11_;\n  const T12 v12_;\n  const T13 v13_;\n  const T14 v14_;\n  const T15 v15_;\n  const T16 v16_;\n  const T17 v17_;\n  const T18 v18_;\n  const T19 v19_;\n  const T20 v20_;\n  const T21 v21_;\n  const T22 v22_;\n  const T23 v23_;\n  const T24 v24_;\n  const T25 v25_;\n  const T26 v26_;\n  const T27 v27_;\n  const T28 v28_;\n  const T29 v29_;\n  const T30 v30_;\n  const T31 v31_;\n  const T32 v32_;\n  const T33 v33_;\n  const T34 v34_;\n  const T35 v35_;\n  const T36 v36_;\n  const T37 v37_;\n  const T38 v38_;\n  const T39 v39_;\n  const T40 v40_;\n  const T41 v41_;\n  const T42 v42_;\n  const T43 v43_;\n  const T44 v44_;\n  const T45 v45_;\n  const T46 v46_;\n  const T47 v47_;\n  const T48 v48_;\n  const T49 v49_;\n  const T50 v50_;\n};\n\n# if GTEST_HAS_COMBINE\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// Generates values from the Cartesian product of values produced\n// by the argument generators.\n//\ntemplate <typename T1, typename T2>\nclass CartesianProductGenerator2\n    : public ParamGeneratorInterface< ::testing::tuple<T1, T2> > {\n public:\n  typedef ::testing::tuple<T1, T2> ParamType;\n\n  CartesianProductGenerator2(const ParamGenerator<T1>& g1,\n      const ParamGenerator<T2>& g2)\n      : g1_(g1), g2_(g2) {}\n  virtual ~CartesianProductGenerator2() {}\n\n  virtual ParamIteratorInterface<ParamType>* Begin() const {\n    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin());\n  }\n  virtual ParamIteratorInterface<ParamType>* End() const {\n    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end());\n  }\n\n private:\n  class Iterator : public ParamIteratorInterface<ParamType> {\n   public:\n    Iterator(const ParamGeneratorInterface<ParamType>* base,\n      const ParamGenerator<T1>& g1,\n      const typename ParamGenerator<T1>::iterator& current1,\n      const ParamGenerator<T2>& g2,\n      const typename ParamGenerator<T2>::iterator& current2)\n        : base_(base),\n          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),\n          begin2_(g2.begin()), end2_(g2.end()), current2_(current2)    {\n      ComputeCurrentValue();\n    }\n    virtual ~Iterator() {}\n\n    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {\n      return base_;\n    }\n    // Advance should not be called on beyond-of-range iterators\n    // so no component iterators must be beyond end of range, either.\n    virtual void Advance() {\n      assert(!AtEnd());\n      ++current2_;\n      if (current2_ == end2_) {\n        current2_ = begin2_;\n        ++current1_;\n      }\n      ComputeCurrentValue();\n    }\n    virtual ParamIteratorInterface<ParamType>* Clone() const {\n      return new Iterator(*this);\n    }\n    virtual const ParamType* Current() const { return &current_value_; }\n    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {\n      // Having the same base generator guarantees that the other\n      // iterator is of the same type and we can downcast.\n      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())\n          << \"The program attempted to compare iterators \"\n          << \"from different generators.\" << std::endl;\n      const Iterator* typed_other =\n          CheckedDowncastToActualType<const Iterator>(&other);\n      // We must report iterators equal if they both point beyond their\n      // respective ranges. That can happen in a variety of fashions,\n      // so we have to consult AtEnd().\n      return (AtEnd() && typed_other->AtEnd()) ||\n         (\n          current1_ == typed_other->current1_ &&\n          current2_ == typed_other->current2_);\n    }\n\n   private:\n    Iterator(const Iterator& other)\n        : base_(other.base_),\n        begin1_(other.begin1_),\n        end1_(other.end1_),\n        current1_(other.current1_),\n        begin2_(other.begin2_),\n        end2_(other.end2_),\n        current2_(other.current2_) {\n      ComputeCurrentValue();\n    }\n\n    void ComputeCurrentValue() {\n      if (!AtEnd())\n        current_value_ = ParamType(*current1_, *current2_);\n    }\n    bool AtEnd() const {\n      // We must report iterator past the end of the range when either of the\n      // component iterators has reached the end of its range.\n      return\n          current1_ == end1_ ||\n          current2_ == end2_;\n    }\n\n    // No implementation - assignment is unsupported.\n    void operator=(const Iterator& other);\n\n    const ParamGeneratorInterface<ParamType>* const base_;\n    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.\n    // current[i]_ is the actual traversing iterator.\n    const typename ParamGenerator<T1>::iterator begin1_;\n    const typename ParamGenerator<T1>::iterator end1_;\n    typename ParamGenerator<T1>::iterator current1_;\n    const typename ParamGenerator<T2>::iterator begin2_;\n    const typename ParamGenerator<T2>::iterator end2_;\n    typename ParamGenerator<T2>::iterator current2_;\n    ParamType current_value_;\n  };  // class CartesianProductGenerator2::Iterator\n\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductGenerator2& other);\n\n  const ParamGenerator<T1> g1_;\n  const ParamGenerator<T2> g2_;\n};  // class CartesianProductGenerator2\n\n\ntemplate <typename T1, typename T2, typename T3>\nclass CartesianProductGenerator3\n    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3> > {\n public:\n  typedef ::testing::tuple<T1, T2, T3> ParamType;\n\n  CartesianProductGenerator3(const ParamGenerator<T1>& g1,\n      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3)\n      : g1_(g1), g2_(g2), g3_(g3) {}\n  virtual ~CartesianProductGenerator3() {}\n\n  virtual ParamIteratorInterface<ParamType>* Begin() const {\n    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,\n        g3_.begin());\n  }\n  virtual ParamIteratorInterface<ParamType>* End() const {\n    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end());\n  }\n\n private:\n  class Iterator : public ParamIteratorInterface<ParamType> {\n   public:\n    Iterator(const ParamGeneratorInterface<ParamType>* base,\n      const ParamGenerator<T1>& g1,\n      const typename ParamGenerator<T1>::iterator& current1,\n      const ParamGenerator<T2>& g2,\n      const typename ParamGenerator<T2>::iterator& current2,\n      const ParamGenerator<T3>& g3,\n      const typename ParamGenerator<T3>::iterator& current3)\n        : base_(base),\n          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),\n          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),\n          begin3_(g3.begin()), end3_(g3.end()), current3_(current3)    {\n      ComputeCurrentValue();\n    }\n    virtual ~Iterator() {}\n\n    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {\n      return base_;\n    }\n    // Advance should not be called on beyond-of-range iterators\n    // so no component iterators must be beyond end of range, either.\n    virtual void Advance() {\n      assert(!AtEnd());\n      ++current3_;\n      if (current3_ == end3_) {\n        current3_ = begin3_;\n        ++current2_;\n      }\n      if (current2_ == end2_) {\n        current2_ = begin2_;\n        ++current1_;\n      }\n      ComputeCurrentValue();\n    }\n    virtual ParamIteratorInterface<ParamType>* Clone() const {\n      return new Iterator(*this);\n    }\n    virtual const ParamType* Current() const { return &current_value_; }\n    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {\n      // Having the same base generator guarantees that the other\n      // iterator is of the same type and we can downcast.\n      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())\n          << \"The program attempted to compare iterators \"\n          << \"from different generators.\" << std::endl;\n      const Iterator* typed_other =\n          CheckedDowncastToActualType<const Iterator>(&other);\n      // We must report iterators equal if they both point beyond their\n      // respective ranges. That can happen in a variety of fashions,\n      // so we have to consult AtEnd().\n      return (AtEnd() && typed_other->AtEnd()) ||\n         (\n          current1_ == typed_other->current1_ &&\n          current2_ == typed_other->current2_ &&\n          current3_ == typed_other->current3_);\n    }\n\n   private:\n    Iterator(const Iterator& other)\n        : base_(other.base_),\n        begin1_(other.begin1_),\n        end1_(other.end1_),\n        current1_(other.current1_),\n        begin2_(other.begin2_),\n        end2_(other.end2_),\n        current2_(other.current2_),\n        begin3_(other.begin3_),\n        end3_(other.end3_),\n        current3_(other.current3_) {\n      ComputeCurrentValue();\n    }\n\n    void ComputeCurrentValue() {\n      if (!AtEnd())\n        current_value_ = ParamType(*current1_, *current2_, *current3_);\n    }\n    bool AtEnd() const {\n      // We must report iterator past the end of the range when either of the\n      // component iterators has reached the end of its range.\n      return\n          current1_ == end1_ ||\n          current2_ == end2_ ||\n          current3_ == end3_;\n    }\n\n    // No implementation - assignment is unsupported.\n    void operator=(const Iterator& other);\n\n    const ParamGeneratorInterface<ParamType>* const base_;\n    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.\n    // current[i]_ is the actual traversing iterator.\n    const typename ParamGenerator<T1>::iterator begin1_;\n    const typename ParamGenerator<T1>::iterator end1_;\n    typename ParamGenerator<T1>::iterator current1_;\n    const typename ParamGenerator<T2>::iterator begin2_;\n    const typename ParamGenerator<T2>::iterator end2_;\n    typename ParamGenerator<T2>::iterator current2_;\n    const typename ParamGenerator<T3>::iterator begin3_;\n    const typename ParamGenerator<T3>::iterator end3_;\n    typename ParamGenerator<T3>::iterator current3_;\n    ParamType current_value_;\n  };  // class CartesianProductGenerator3::Iterator\n\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductGenerator3& other);\n\n  const ParamGenerator<T1> g1_;\n  const ParamGenerator<T2> g2_;\n  const ParamGenerator<T3> g3_;\n};  // class CartesianProductGenerator3\n\n\ntemplate <typename T1, typename T2, typename T3, typename T4>\nclass CartesianProductGenerator4\n    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4> > {\n public:\n  typedef ::testing::tuple<T1, T2, T3, T4> ParamType;\n\n  CartesianProductGenerator4(const ParamGenerator<T1>& g1,\n      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,\n      const ParamGenerator<T4>& g4)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}\n  virtual ~CartesianProductGenerator4() {}\n\n  virtual ParamIteratorInterface<ParamType>* Begin() const {\n    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,\n        g3_.begin(), g4_, g4_.begin());\n  }\n  virtual ParamIteratorInterface<ParamType>* End() const {\n    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),\n        g4_, g4_.end());\n  }\n\n private:\n  class Iterator : public ParamIteratorInterface<ParamType> {\n   public:\n    Iterator(const ParamGeneratorInterface<ParamType>* base,\n      const ParamGenerator<T1>& g1,\n      const typename ParamGenerator<T1>::iterator& current1,\n      const ParamGenerator<T2>& g2,\n      const typename ParamGenerator<T2>::iterator& current2,\n      const ParamGenerator<T3>& g3,\n      const typename ParamGenerator<T3>::iterator& current3,\n      const ParamGenerator<T4>& g4,\n      const typename ParamGenerator<T4>::iterator& current4)\n        : base_(base),\n          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),\n          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),\n          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),\n          begin4_(g4.begin()), end4_(g4.end()), current4_(current4)    {\n      ComputeCurrentValue();\n    }\n    virtual ~Iterator() {}\n\n    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {\n      return base_;\n    }\n    // Advance should not be called on beyond-of-range iterators\n    // so no component iterators must be beyond end of range, either.\n    virtual void Advance() {\n      assert(!AtEnd());\n      ++current4_;\n      if (current4_ == end4_) {\n        current4_ = begin4_;\n        ++current3_;\n      }\n      if (current3_ == end3_) {\n        current3_ = begin3_;\n        ++current2_;\n      }\n      if (current2_ == end2_) {\n        current2_ = begin2_;\n        ++current1_;\n      }\n      ComputeCurrentValue();\n    }\n    virtual ParamIteratorInterface<ParamType>* Clone() const {\n      return new Iterator(*this);\n    }\n    virtual const ParamType* Current() const { return &current_value_; }\n    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {\n      // Having the same base generator guarantees that the other\n      // iterator is of the same type and we can downcast.\n      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())\n          << \"The program attempted to compare iterators \"\n          << \"from different generators.\" << std::endl;\n      const Iterator* typed_other =\n          CheckedDowncastToActualType<const Iterator>(&other);\n      // We must report iterators equal if they both point beyond their\n      // respective ranges. That can happen in a variety of fashions,\n      // so we have to consult AtEnd().\n      return (AtEnd() && typed_other->AtEnd()) ||\n         (\n          current1_ == typed_other->current1_ &&\n          current2_ == typed_other->current2_ &&\n          current3_ == typed_other->current3_ &&\n          current4_ == typed_other->current4_);\n    }\n\n   private:\n    Iterator(const Iterator& other)\n        : base_(other.base_),\n        begin1_(other.begin1_),\n        end1_(other.end1_),\n        current1_(other.current1_),\n        begin2_(other.begin2_),\n        end2_(other.end2_),\n        current2_(other.current2_),\n        begin3_(other.begin3_),\n        end3_(other.end3_),\n        current3_(other.current3_),\n        begin4_(other.begin4_),\n        end4_(other.end4_),\n        current4_(other.current4_) {\n      ComputeCurrentValue();\n    }\n\n    void ComputeCurrentValue() {\n      if (!AtEnd())\n        current_value_ = ParamType(*current1_, *current2_, *current3_,\n            *current4_);\n    }\n    bool AtEnd() const {\n      // We must report iterator past the end of the range when either of the\n      // component iterators has reached the end of its range.\n      return\n          current1_ == end1_ ||\n          current2_ == end2_ ||\n          current3_ == end3_ ||\n          current4_ == end4_;\n    }\n\n    // No implementation - assignment is unsupported.\n    void operator=(const Iterator& other);\n\n    const ParamGeneratorInterface<ParamType>* const base_;\n    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.\n    // current[i]_ is the actual traversing iterator.\n    const typename ParamGenerator<T1>::iterator begin1_;\n    const typename ParamGenerator<T1>::iterator end1_;\n    typename ParamGenerator<T1>::iterator current1_;\n    const typename ParamGenerator<T2>::iterator begin2_;\n    const typename ParamGenerator<T2>::iterator end2_;\n    typename ParamGenerator<T2>::iterator current2_;\n    const typename ParamGenerator<T3>::iterator begin3_;\n    const typename ParamGenerator<T3>::iterator end3_;\n    typename ParamGenerator<T3>::iterator current3_;\n    const typename ParamGenerator<T4>::iterator begin4_;\n    const typename ParamGenerator<T4>::iterator end4_;\n    typename ParamGenerator<T4>::iterator current4_;\n    ParamType current_value_;\n  };  // class CartesianProductGenerator4::Iterator\n\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductGenerator4& other);\n\n  const ParamGenerator<T1> g1_;\n  const ParamGenerator<T2> g2_;\n  const ParamGenerator<T3> g3_;\n  const ParamGenerator<T4> g4_;\n};  // class CartesianProductGenerator4\n\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5>\nclass CartesianProductGenerator5\n    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5> > {\n public:\n  typedef ::testing::tuple<T1, T2, T3, T4, T5> ParamType;\n\n  CartesianProductGenerator5(const ParamGenerator<T1>& g1,\n      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,\n      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}\n  virtual ~CartesianProductGenerator5() {}\n\n  virtual ParamIteratorInterface<ParamType>* Begin() const {\n    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,\n        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin());\n  }\n  virtual ParamIteratorInterface<ParamType>* End() const {\n    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),\n        g4_, g4_.end(), g5_, g5_.end());\n  }\n\n private:\n  class Iterator : public ParamIteratorInterface<ParamType> {\n   public:\n    Iterator(const ParamGeneratorInterface<ParamType>* base,\n      const ParamGenerator<T1>& g1,\n      const typename ParamGenerator<T1>::iterator& current1,\n      const ParamGenerator<T2>& g2,\n      const typename ParamGenerator<T2>::iterator& current2,\n      const ParamGenerator<T3>& g3,\n      const typename ParamGenerator<T3>::iterator& current3,\n      const ParamGenerator<T4>& g4,\n      const typename ParamGenerator<T4>::iterator& current4,\n      const ParamGenerator<T5>& g5,\n      const typename ParamGenerator<T5>::iterator& current5)\n        : base_(base),\n          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),\n          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),\n          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),\n          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),\n          begin5_(g5.begin()), end5_(g5.end()), current5_(current5)    {\n      ComputeCurrentValue();\n    }\n    virtual ~Iterator() {}\n\n    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {\n      return base_;\n    }\n    // Advance should not be called on beyond-of-range iterators\n    // so no component iterators must be beyond end of range, either.\n    virtual void Advance() {\n      assert(!AtEnd());\n      ++current5_;\n      if (current5_ == end5_) {\n        current5_ = begin5_;\n        ++current4_;\n      }\n      if (current4_ == end4_) {\n        current4_ = begin4_;\n        ++current3_;\n      }\n      if (current3_ == end3_) {\n        current3_ = begin3_;\n        ++current2_;\n      }\n      if (current2_ == end2_) {\n        current2_ = begin2_;\n        ++current1_;\n      }\n      ComputeCurrentValue();\n    }\n    virtual ParamIteratorInterface<ParamType>* Clone() const {\n      return new Iterator(*this);\n    }\n    virtual const ParamType* Current() const { return &current_value_; }\n    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {\n      // Having the same base generator guarantees that the other\n      // iterator is of the same type and we can downcast.\n      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())\n          << \"The program attempted to compare iterators \"\n          << \"from different generators.\" << std::endl;\n      const Iterator* typed_other =\n          CheckedDowncastToActualType<const Iterator>(&other);\n      // We must report iterators equal if they both point beyond their\n      // respective ranges. That can happen in a variety of fashions,\n      // so we have to consult AtEnd().\n      return (AtEnd() && typed_other->AtEnd()) ||\n         (\n          current1_ == typed_other->current1_ &&\n          current2_ == typed_other->current2_ &&\n          current3_ == typed_other->current3_ &&\n          current4_ == typed_other->current4_ &&\n          current5_ == typed_other->current5_);\n    }\n\n   private:\n    Iterator(const Iterator& other)\n        : base_(other.base_),\n        begin1_(other.begin1_),\n        end1_(other.end1_),\n        current1_(other.current1_),\n        begin2_(other.begin2_),\n        end2_(other.end2_),\n        current2_(other.current2_),\n        begin3_(other.begin3_),\n        end3_(other.end3_),\n        current3_(other.current3_),\n        begin4_(other.begin4_),\n        end4_(other.end4_),\n        current4_(other.current4_),\n        begin5_(other.begin5_),\n        end5_(other.end5_),\n        current5_(other.current5_) {\n      ComputeCurrentValue();\n    }\n\n    void ComputeCurrentValue() {\n      if (!AtEnd())\n        current_value_ = ParamType(*current1_, *current2_, *current3_,\n            *current4_, *current5_);\n    }\n    bool AtEnd() const {\n      // We must report iterator past the end of the range when either of the\n      // component iterators has reached the end of its range.\n      return\n          current1_ == end1_ ||\n          current2_ == end2_ ||\n          current3_ == end3_ ||\n          current4_ == end4_ ||\n          current5_ == end5_;\n    }\n\n    // No implementation - assignment is unsupported.\n    void operator=(const Iterator& other);\n\n    const ParamGeneratorInterface<ParamType>* const base_;\n    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.\n    // current[i]_ is the actual traversing iterator.\n    const typename ParamGenerator<T1>::iterator begin1_;\n    const typename ParamGenerator<T1>::iterator end1_;\n    typename ParamGenerator<T1>::iterator current1_;\n    const typename ParamGenerator<T2>::iterator begin2_;\n    const typename ParamGenerator<T2>::iterator end2_;\n    typename ParamGenerator<T2>::iterator current2_;\n    const typename ParamGenerator<T3>::iterator begin3_;\n    const typename ParamGenerator<T3>::iterator end3_;\n    typename ParamGenerator<T3>::iterator current3_;\n    const typename ParamGenerator<T4>::iterator begin4_;\n    const typename ParamGenerator<T4>::iterator end4_;\n    typename ParamGenerator<T4>::iterator current4_;\n    const typename ParamGenerator<T5>::iterator begin5_;\n    const typename ParamGenerator<T5>::iterator end5_;\n    typename ParamGenerator<T5>::iterator current5_;\n    ParamType current_value_;\n  };  // class CartesianProductGenerator5::Iterator\n\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductGenerator5& other);\n\n  const ParamGenerator<T1> g1_;\n  const ParamGenerator<T2> g2_;\n  const ParamGenerator<T3> g3_;\n  const ParamGenerator<T4> g4_;\n  const ParamGenerator<T5> g5_;\n};  // class CartesianProductGenerator5\n\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6>\nclass CartesianProductGenerator6\n    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5,\n        T6> > {\n public:\n  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6> ParamType;\n\n  CartesianProductGenerator6(const ParamGenerator<T1>& g1,\n      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,\n      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,\n      const ParamGenerator<T6>& g6)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}\n  virtual ~CartesianProductGenerator6() {}\n\n  virtual ParamIteratorInterface<ParamType>* Begin() const {\n    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,\n        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin());\n  }\n  virtual ParamIteratorInterface<ParamType>* End() const {\n    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),\n        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end());\n  }\n\n private:\n  class Iterator : public ParamIteratorInterface<ParamType> {\n   public:\n    Iterator(const ParamGeneratorInterface<ParamType>* base,\n      const ParamGenerator<T1>& g1,\n      const typename ParamGenerator<T1>::iterator& current1,\n      const ParamGenerator<T2>& g2,\n      const typename ParamGenerator<T2>::iterator& current2,\n      const ParamGenerator<T3>& g3,\n      const typename ParamGenerator<T3>::iterator& current3,\n      const ParamGenerator<T4>& g4,\n      const typename ParamGenerator<T4>::iterator& current4,\n      const ParamGenerator<T5>& g5,\n      const typename ParamGenerator<T5>::iterator& current5,\n      const ParamGenerator<T6>& g6,\n      const typename ParamGenerator<T6>::iterator& current6)\n        : base_(base),\n          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),\n          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),\n          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),\n          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),\n          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),\n          begin6_(g6.begin()), end6_(g6.end()), current6_(current6)    {\n      ComputeCurrentValue();\n    }\n    virtual ~Iterator() {}\n\n    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {\n      return base_;\n    }\n    // Advance should not be called on beyond-of-range iterators\n    // so no component iterators must be beyond end of range, either.\n    virtual void Advance() {\n      assert(!AtEnd());\n      ++current6_;\n      if (current6_ == end6_) {\n        current6_ = begin6_;\n        ++current5_;\n      }\n      if (current5_ == end5_) {\n        current5_ = begin5_;\n        ++current4_;\n      }\n      if (current4_ == end4_) {\n        current4_ = begin4_;\n        ++current3_;\n      }\n      if (current3_ == end3_) {\n        current3_ = begin3_;\n        ++current2_;\n      }\n      if (current2_ == end2_) {\n        current2_ = begin2_;\n        ++current1_;\n      }\n      ComputeCurrentValue();\n    }\n    virtual ParamIteratorInterface<ParamType>* Clone() const {\n      return new Iterator(*this);\n    }\n    virtual const ParamType* Current() const { return &current_value_; }\n    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {\n      // Having the same base generator guarantees that the other\n      // iterator is of the same type and we can downcast.\n      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())\n          << \"The program attempted to compare iterators \"\n          << \"from different generators.\" << std::endl;\n      const Iterator* typed_other =\n          CheckedDowncastToActualType<const Iterator>(&other);\n      // We must report iterators equal if they both point beyond their\n      // respective ranges. That can happen in a variety of fashions,\n      // so we have to consult AtEnd().\n      return (AtEnd() && typed_other->AtEnd()) ||\n         (\n          current1_ == typed_other->current1_ &&\n          current2_ == typed_other->current2_ &&\n          current3_ == typed_other->current3_ &&\n          current4_ == typed_other->current4_ &&\n          current5_ == typed_other->current5_ &&\n          current6_ == typed_other->current6_);\n    }\n\n   private:\n    Iterator(const Iterator& other)\n        : base_(other.base_),\n        begin1_(other.begin1_),\n        end1_(other.end1_),\n        current1_(other.current1_),\n        begin2_(other.begin2_),\n        end2_(other.end2_),\n        current2_(other.current2_),\n        begin3_(other.begin3_),\n        end3_(other.end3_),\n        current3_(other.current3_),\n        begin4_(other.begin4_),\n        end4_(other.end4_),\n        current4_(other.current4_),\n        begin5_(other.begin5_),\n        end5_(other.end5_),\n        current5_(other.current5_),\n        begin6_(other.begin6_),\n        end6_(other.end6_),\n        current6_(other.current6_) {\n      ComputeCurrentValue();\n    }\n\n    void ComputeCurrentValue() {\n      if (!AtEnd())\n        current_value_ = ParamType(*current1_, *current2_, *current3_,\n            *current4_, *current5_, *current6_);\n    }\n    bool AtEnd() const {\n      // We must report iterator past the end of the range when either of the\n      // component iterators has reached the end of its range.\n      return\n          current1_ == end1_ ||\n          current2_ == end2_ ||\n          current3_ == end3_ ||\n          current4_ == end4_ ||\n          current5_ == end5_ ||\n          current6_ == end6_;\n    }\n\n    // No implementation - assignment is unsupported.\n    void operator=(const Iterator& other);\n\n    const ParamGeneratorInterface<ParamType>* const base_;\n    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.\n    // current[i]_ is the actual traversing iterator.\n    const typename ParamGenerator<T1>::iterator begin1_;\n    const typename ParamGenerator<T1>::iterator end1_;\n    typename ParamGenerator<T1>::iterator current1_;\n    const typename ParamGenerator<T2>::iterator begin2_;\n    const typename ParamGenerator<T2>::iterator end2_;\n    typename ParamGenerator<T2>::iterator current2_;\n    const typename ParamGenerator<T3>::iterator begin3_;\n    const typename ParamGenerator<T3>::iterator end3_;\n    typename ParamGenerator<T3>::iterator current3_;\n    const typename ParamGenerator<T4>::iterator begin4_;\n    const typename ParamGenerator<T4>::iterator end4_;\n    typename ParamGenerator<T4>::iterator current4_;\n    const typename ParamGenerator<T5>::iterator begin5_;\n    const typename ParamGenerator<T5>::iterator end5_;\n    typename ParamGenerator<T5>::iterator current5_;\n    const typename ParamGenerator<T6>::iterator begin6_;\n    const typename ParamGenerator<T6>::iterator end6_;\n    typename ParamGenerator<T6>::iterator current6_;\n    ParamType current_value_;\n  };  // class CartesianProductGenerator6::Iterator\n\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductGenerator6& other);\n\n  const ParamGenerator<T1> g1_;\n  const ParamGenerator<T2> g2_;\n  const ParamGenerator<T3> g3_;\n  const ParamGenerator<T4> g4_;\n  const ParamGenerator<T5> g5_;\n  const ParamGenerator<T6> g6_;\n};  // class CartesianProductGenerator6\n\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7>\nclass CartesianProductGenerator7\n    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,\n        T7> > {\n public:\n  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7> ParamType;\n\n  CartesianProductGenerator7(const ParamGenerator<T1>& g1,\n      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,\n      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,\n      const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}\n  virtual ~CartesianProductGenerator7() {}\n\n  virtual ParamIteratorInterface<ParamType>* Begin() const {\n    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,\n        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,\n        g7_.begin());\n  }\n  virtual ParamIteratorInterface<ParamType>* End() const {\n    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),\n        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end());\n  }\n\n private:\n  class Iterator : public ParamIteratorInterface<ParamType> {\n   public:\n    Iterator(const ParamGeneratorInterface<ParamType>* base,\n      const ParamGenerator<T1>& g1,\n      const typename ParamGenerator<T1>::iterator& current1,\n      const ParamGenerator<T2>& g2,\n      const typename ParamGenerator<T2>::iterator& current2,\n      const ParamGenerator<T3>& g3,\n      const typename ParamGenerator<T3>::iterator& current3,\n      const ParamGenerator<T4>& g4,\n      const typename ParamGenerator<T4>::iterator& current4,\n      const ParamGenerator<T5>& g5,\n      const typename ParamGenerator<T5>::iterator& current5,\n      const ParamGenerator<T6>& g6,\n      const typename ParamGenerator<T6>::iterator& current6,\n      const ParamGenerator<T7>& g7,\n      const typename ParamGenerator<T7>::iterator& current7)\n        : base_(base),\n          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),\n          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),\n          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),\n          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),\n          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),\n          begin6_(g6.begin()), end6_(g6.end()), current6_(current6),\n          begin7_(g7.begin()), end7_(g7.end()), current7_(current7)    {\n      ComputeCurrentValue();\n    }\n    virtual ~Iterator() {}\n\n    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {\n      return base_;\n    }\n    // Advance should not be called on beyond-of-range iterators\n    // so no component iterators must be beyond end of range, either.\n    virtual void Advance() {\n      assert(!AtEnd());\n      ++current7_;\n      if (current7_ == end7_) {\n        current7_ = begin7_;\n        ++current6_;\n      }\n      if (current6_ == end6_) {\n        current6_ = begin6_;\n        ++current5_;\n      }\n      if (current5_ == end5_) {\n        current5_ = begin5_;\n        ++current4_;\n      }\n      if (current4_ == end4_) {\n        current4_ = begin4_;\n        ++current3_;\n      }\n      if (current3_ == end3_) {\n        current3_ = begin3_;\n        ++current2_;\n      }\n      if (current2_ == end2_) {\n        current2_ = begin2_;\n        ++current1_;\n      }\n      ComputeCurrentValue();\n    }\n    virtual ParamIteratorInterface<ParamType>* Clone() const {\n      return new Iterator(*this);\n    }\n    virtual const ParamType* Current() const { return &current_value_; }\n    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {\n      // Having the same base generator guarantees that the other\n      // iterator is of the same type and we can downcast.\n      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())\n          << \"The program attempted to compare iterators \"\n          << \"from different generators.\" << std::endl;\n      const Iterator* typed_other =\n          CheckedDowncastToActualType<const Iterator>(&other);\n      // We must report iterators equal if they both point beyond their\n      // respective ranges. That can happen in a variety of fashions,\n      // so we have to consult AtEnd().\n      return (AtEnd() && typed_other->AtEnd()) ||\n         (\n          current1_ == typed_other->current1_ &&\n          current2_ == typed_other->current2_ &&\n          current3_ == typed_other->current3_ &&\n          current4_ == typed_other->current4_ &&\n          current5_ == typed_other->current5_ &&\n          current6_ == typed_other->current6_ &&\n          current7_ == typed_other->current7_);\n    }\n\n   private:\n    Iterator(const Iterator& other)\n        : base_(other.base_),\n        begin1_(other.begin1_),\n        end1_(other.end1_),\n        current1_(other.current1_),\n        begin2_(other.begin2_),\n        end2_(other.end2_),\n        current2_(other.current2_),\n        begin3_(other.begin3_),\n        end3_(other.end3_),\n        current3_(other.current3_),\n        begin4_(other.begin4_),\n        end4_(other.end4_),\n        current4_(other.current4_),\n        begin5_(other.begin5_),\n        end5_(other.end5_),\n        current5_(other.current5_),\n        begin6_(other.begin6_),\n        end6_(other.end6_),\n        current6_(other.current6_),\n        begin7_(other.begin7_),\n        end7_(other.end7_),\n        current7_(other.current7_) {\n      ComputeCurrentValue();\n    }\n\n    void ComputeCurrentValue() {\n      if (!AtEnd())\n        current_value_ = ParamType(*current1_, *current2_, *current3_,\n            *current4_, *current5_, *current6_, *current7_);\n    }\n    bool AtEnd() const {\n      // We must report iterator past the end of the range when either of the\n      // component iterators has reached the end of its range.\n      return\n          current1_ == end1_ ||\n          current2_ == end2_ ||\n          current3_ == end3_ ||\n          current4_ == end4_ ||\n          current5_ == end5_ ||\n          current6_ == end6_ ||\n          current7_ == end7_;\n    }\n\n    // No implementation - assignment is unsupported.\n    void operator=(const Iterator& other);\n\n    const ParamGeneratorInterface<ParamType>* const base_;\n    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.\n    // current[i]_ is the actual traversing iterator.\n    const typename ParamGenerator<T1>::iterator begin1_;\n    const typename ParamGenerator<T1>::iterator end1_;\n    typename ParamGenerator<T1>::iterator current1_;\n    const typename ParamGenerator<T2>::iterator begin2_;\n    const typename ParamGenerator<T2>::iterator end2_;\n    typename ParamGenerator<T2>::iterator current2_;\n    const typename ParamGenerator<T3>::iterator begin3_;\n    const typename ParamGenerator<T3>::iterator end3_;\n    typename ParamGenerator<T3>::iterator current3_;\n    const typename ParamGenerator<T4>::iterator begin4_;\n    const typename ParamGenerator<T4>::iterator end4_;\n    typename ParamGenerator<T4>::iterator current4_;\n    const typename ParamGenerator<T5>::iterator begin5_;\n    const typename ParamGenerator<T5>::iterator end5_;\n    typename ParamGenerator<T5>::iterator current5_;\n    const typename ParamGenerator<T6>::iterator begin6_;\n    const typename ParamGenerator<T6>::iterator end6_;\n    typename ParamGenerator<T6>::iterator current6_;\n    const typename ParamGenerator<T7>::iterator begin7_;\n    const typename ParamGenerator<T7>::iterator end7_;\n    typename ParamGenerator<T7>::iterator current7_;\n    ParamType current_value_;\n  };  // class CartesianProductGenerator7::Iterator\n\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductGenerator7& other);\n\n  const ParamGenerator<T1> g1_;\n  const ParamGenerator<T2> g2_;\n  const ParamGenerator<T3> g3_;\n  const ParamGenerator<T4> g4_;\n  const ParamGenerator<T5> g5_;\n  const ParamGenerator<T6> g6_;\n  const ParamGenerator<T7> g7_;\n};  // class CartesianProductGenerator7\n\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8>\nclass CartesianProductGenerator8\n    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,\n        T7, T8> > {\n public:\n  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8> ParamType;\n\n  CartesianProductGenerator8(const ParamGenerator<T1>& g1,\n      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,\n      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,\n      const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,\n      const ParamGenerator<T8>& g8)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),\n          g8_(g8) {}\n  virtual ~CartesianProductGenerator8() {}\n\n  virtual ParamIteratorInterface<ParamType>* Begin() const {\n    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,\n        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,\n        g7_.begin(), g8_, g8_.begin());\n  }\n  virtual ParamIteratorInterface<ParamType>* End() const {\n    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),\n        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,\n        g8_.end());\n  }\n\n private:\n  class Iterator : public ParamIteratorInterface<ParamType> {\n   public:\n    Iterator(const ParamGeneratorInterface<ParamType>* base,\n      const ParamGenerator<T1>& g1,\n      const typename ParamGenerator<T1>::iterator& current1,\n      const ParamGenerator<T2>& g2,\n      const typename ParamGenerator<T2>::iterator& current2,\n      const ParamGenerator<T3>& g3,\n      const typename ParamGenerator<T3>::iterator& current3,\n      const ParamGenerator<T4>& g4,\n      const typename ParamGenerator<T4>::iterator& current4,\n      const ParamGenerator<T5>& g5,\n      const typename ParamGenerator<T5>::iterator& current5,\n      const ParamGenerator<T6>& g6,\n      const typename ParamGenerator<T6>::iterator& current6,\n      const ParamGenerator<T7>& g7,\n      const typename ParamGenerator<T7>::iterator& current7,\n      const ParamGenerator<T8>& g8,\n      const typename ParamGenerator<T8>::iterator& current8)\n        : base_(base),\n          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),\n          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),\n          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),\n          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),\n          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),\n          begin6_(g6.begin()), end6_(g6.end()), current6_(current6),\n          begin7_(g7.begin()), end7_(g7.end()), current7_(current7),\n          begin8_(g8.begin()), end8_(g8.end()), current8_(current8)    {\n      ComputeCurrentValue();\n    }\n    virtual ~Iterator() {}\n\n    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {\n      return base_;\n    }\n    // Advance should not be called on beyond-of-range iterators\n    // so no component iterators must be beyond end of range, either.\n    virtual void Advance() {\n      assert(!AtEnd());\n      ++current8_;\n      if (current8_ == end8_) {\n        current8_ = begin8_;\n        ++current7_;\n      }\n      if (current7_ == end7_) {\n        current7_ = begin7_;\n        ++current6_;\n      }\n      if (current6_ == end6_) {\n        current6_ = begin6_;\n        ++current5_;\n      }\n      if (current5_ == end5_) {\n        current5_ = begin5_;\n        ++current4_;\n      }\n      if (current4_ == end4_) {\n        current4_ = begin4_;\n        ++current3_;\n      }\n      if (current3_ == end3_) {\n        current3_ = begin3_;\n        ++current2_;\n      }\n      if (current2_ == end2_) {\n        current2_ = begin2_;\n        ++current1_;\n      }\n      ComputeCurrentValue();\n    }\n    virtual ParamIteratorInterface<ParamType>* Clone() const {\n      return new Iterator(*this);\n    }\n    virtual const ParamType* Current() const { return &current_value_; }\n    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {\n      // Having the same base generator guarantees that the other\n      // iterator is of the same type and we can downcast.\n      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())\n          << \"The program attempted to compare iterators \"\n          << \"from different generators.\" << std::endl;\n      const Iterator* typed_other =\n          CheckedDowncastToActualType<const Iterator>(&other);\n      // We must report iterators equal if they both point beyond their\n      // respective ranges. That can happen in a variety of fashions,\n      // so we have to consult AtEnd().\n      return (AtEnd() && typed_other->AtEnd()) ||\n         (\n          current1_ == typed_other->current1_ &&\n          current2_ == typed_other->current2_ &&\n          current3_ == typed_other->current3_ &&\n          current4_ == typed_other->current4_ &&\n          current5_ == typed_other->current5_ &&\n          current6_ == typed_other->current6_ &&\n          current7_ == typed_other->current7_ &&\n          current8_ == typed_other->current8_);\n    }\n\n   private:\n    Iterator(const Iterator& other)\n        : base_(other.base_),\n        begin1_(other.begin1_),\n        end1_(other.end1_),\n        current1_(other.current1_),\n        begin2_(other.begin2_),\n        end2_(other.end2_),\n        current2_(other.current2_),\n        begin3_(other.begin3_),\n        end3_(other.end3_),\n        current3_(other.current3_),\n        begin4_(other.begin4_),\n        end4_(other.end4_),\n        current4_(other.current4_),\n        begin5_(other.begin5_),\n        end5_(other.end5_),\n        current5_(other.current5_),\n        begin6_(other.begin6_),\n        end6_(other.end6_),\n        current6_(other.current6_),\n        begin7_(other.begin7_),\n        end7_(other.end7_),\n        current7_(other.current7_),\n        begin8_(other.begin8_),\n        end8_(other.end8_),\n        current8_(other.current8_) {\n      ComputeCurrentValue();\n    }\n\n    void ComputeCurrentValue() {\n      if (!AtEnd())\n        current_value_ = ParamType(*current1_, *current2_, *current3_,\n            *current4_, *current5_, *current6_, *current7_, *current8_);\n    }\n    bool AtEnd() const {\n      // We must report iterator past the end of the range when either of the\n      // component iterators has reached the end of its range.\n      return\n          current1_ == end1_ ||\n          current2_ == end2_ ||\n          current3_ == end3_ ||\n          current4_ == end4_ ||\n          current5_ == end5_ ||\n          current6_ == end6_ ||\n          current7_ == end7_ ||\n          current8_ == end8_;\n    }\n\n    // No implementation - assignment is unsupported.\n    void operator=(const Iterator& other);\n\n    const ParamGeneratorInterface<ParamType>* const base_;\n    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.\n    // current[i]_ is the actual traversing iterator.\n    const typename ParamGenerator<T1>::iterator begin1_;\n    const typename ParamGenerator<T1>::iterator end1_;\n    typename ParamGenerator<T1>::iterator current1_;\n    const typename ParamGenerator<T2>::iterator begin2_;\n    const typename ParamGenerator<T2>::iterator end2_;\n    typename ParamGenerator<T2>::iterator current2_;\n    const typename ParamGenerator<T3>::iterator begin3_;\n    const typename ParamGenerator<T3>::iterator end3_;\n    typename ParamGenerator<T3>::iterator current3_;\n    const typename ParamGenerator<T4>::iterator begin4_;\n    const typename ParamGenerator<T4>::iterator end4_;\n    typename ParamGenerator<T4>::iterator current4_;\n    const typename ParamGenerator<T5>::iterator begin5_;\n    const typename ParamGenerator<T5>::iterator end5_;\n    typename ParamGenerator<T5>::iterator current5_;\n    const typename ParamGenerator<T6>::iterator begin6_;\n    const typename ParamGenerator<T6>::iterator end6_;\n    typename ParamGenerator<T6>::iterator current6_;\n    const typename ParamGenerator<T7>::iterator begin7_;\n    const typename ParamGenerator<T7>::iterator end7_;\n    typename ParamGenerator<T7>::iterator current7_;\n    const typename ParamGenerator<T8>::iterator begin8_;\n    const typename ParamGenerator<T8>::iterator end8_;\n    typename ParamGenerator<T8>::iterator current8_;\n    ParamType current_value_;\n  };  // class CartesianProductGenerator8::Iterator\n\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductGenerator8& other);\n\n  const ParamGenerator<T1> g1_;\n  const ParamGenerator<T2> g2_;\n  const ParamGenerator<T3> g3_;\n  const ParamGenerator<T4> g4_;\n  const ParamGenerator<T5> g5_;\n  const ParamGenerator<T6> g6_;\n  const ParamGenerator<T7> g7_;\n  const ParamGenerator<T8> g8_;\n};  // class CartesianProductGenerator8\n\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9>\nclass CartesianProductGenerator9\n    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,\n        T7, T8, T9> > {\n public:\n  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9> ParamType;\n\n  CartesianProductGenerator9(const ParamGenerator<T1>& g1,\n      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,\n      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,\n      const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,\n      const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),\n          g9_(g9) {}\n  virtual ~CartesianProductGenerator9() {}\n\n  virtual ParamIteratorInterface<ParamType>* Begin() const {\n    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,\n        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,\n        g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin());\n  }\n  virtual ParamIteratorInterface<ParamType>* End() const {\n    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),\n        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,\n        g8_.end(), g9_, g9_.end());\n  }\n\n private:\n  class Iterator : public ParamIteratorInterface<ParamType> {\n   public:\n    Iterator(const ParamGeneratorInterface<ParamType>* base,\n      const ParamGenerator<T1>& g1,\n      const typename ParamGenerator<T1>::iterator& current1,\n      const ParamGenerator<T2>& g2,\n      const typename ParamGenerator<T2>::iterator& current2,\n      const ParamGenerator<T3>& g3,\n      const typename ParamGenerator<T3>::iterator& current3,\n      const ParamGenerator<T4>& g4,\n      const typename ParamGenerator<T4>::iterator& current4,\n      const ParamGenerator<T5>& g5,\n      const typename ParamGenerator<T5>::iterator& current5,\n      const ParamGenerator<T6>& g6,\n      const typename ParamGenerator<T6>::iterator& current6,\n      const ParamGenerator<T7>& g7,\n      const typename ParamGenerator<T7>::iterator& current7,\n      const ParamGenerator<T8>& g8,\n      const typename ParamGenerator<T8>::iterator& current8,\n      const ParamGenerator<T9>& g9,\n      const typename ParamGenerator<T9>::iterator& current9)\n        : base_(base),\n          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),\n          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),\n          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),\n          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),\n          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),\n          begin6_(g6.begin()), end6_(g6.end()), current6_(current6),\n          begin7_(g7.begin()), end7_(g7.end()), current7_(current7),\n          begin8_(g8.begin()), end8_(g8.end()), current8_(current8),\n          begin9_(g9.begin()), end9_(g9.end()), current9_(current9)    {\n      ComputeCurrentValue();\n    }\n    virtual ~Iterator() {}\n\n    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {\n      return base_;\n    }\n    // Advance should not be called on beyond-of-range iterators\n    // so no component iterators must be beyond end of range, either.\n    virtual void Advance() {\n      assert(!AtEnd());\n      ++current9_;\n      if (current9_ == end9_) {\n        current9_ = begin9_;\n        ++current8_;\n      }\n      if (current8_ == end8_) {\n        current8_ = begin8_;\n        ++current7_;\n      }\n      if (current7_ == end7_) {\n        current7_ = begin7_;\n        ++current6_;\n      }\n      if (current6_ == end6_) {\n        current6_ = begin6_;\n        ++current5_;\n      }\n      if (current5_ == end5_) {\n        current5_ = begin5_;\n        ++current4_;\n      }\n      if (current4_ == end4_) {\n        current4_ = begin4_;\n        ++current3_;\n      }\n      if (current3_ == end3_) {\n        current3_ = begin3_;\n        ++current2_;\n      }\n      if (current2_ == end2_) {\n        current2_ = begin2_;\n        ++current1_;\n      }\n      ComputeCurrentValue();\n    }\n    virtual ParamIteratorInterface<ParamType>* Clone() const {\n      return new Iterator(*this);\n    }\n    virtual const ParamType* Current() const { return &current_value_; }\n    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {\n      // Having the same base generator guarantees that the other\n      // iterator is of the same type and we can downcast.\n      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())\n          << \"The program attempted to compare iterators \"\n          << \"from different generators.\" << std::endl;\n      const Iterator* typed_other =\n          CheckedDowncastToActualType<const Iterator>(&other);\n      // We must report iterators equal if they both point beyond their\n      // respective ranges. That can happen in a variety of fashions,\n      // so we have to consult AtEnd().\n      return (AtEnd() && typed_other->AtEnd()) ||\n         (\n          current1_ == typed_other->current1_ &&\n          current2_ == typed_other->current2_ &&\n          current3_ == typed_other->current3_ &&\n          current4_ == typed_other->current4_ &&\n          current5_ == typed_other->current5_ &&\n          current6_ == typed_other->current6_ &&\n          current7_ == typed_other->current7_ &&\n          current8_ == typed_other->current8_ &&\n          current9_ == typed_other->current9_);\n    }\n\n   private:\n    Iterator(const Iterator& other)\n        : base_(other.base_),\n        begin1_(other.begin1_),\n        end1_(other.end1_),\n        current1_(other.current1_),\n        begin2_(other.begin2_),\n        end2_(other.end2_),\n        current2_(other.current2_),\n        begin3_(other.begin3_),\n        end3_(other.end3_),\n        current3_(other.current3_),\n        begin4_(other.begin4_),\n        end4_(other.end4_),\n        current4_(other.current4_),\n        begin5_(other.begin5_),\n        end5_(other.end5_),\n        current5_(other.current5_),\n        begin6_(other.begin6_),\n        end6_(other.end6_),\n        current6_(other.current6_),\n        begin7_(other.begin7_),\n        end7_(other.end7_),\n        current7_(other.current7_),\n        begin8_(other.begin8_),\n        end8_(other.end8_),\n        current8_(other.current8_),\n        begin9_(other.begin9_),\n        end9_(other.end9_),\n        current9_(other.current9_) {\n      ComputeCurrentValue();\n    }\n\n    void ComputeCurrentValue() {\n      if (!AtEnd())\n        current_value_ = ParamType(*current1_, *current2_, *current3_,\n            *current4_, *current5_, *current6_, *current7_, *current8_,\n            *current9_);\n    }\n    bool AtEnd() const {\n      // We must report iterator past the end of the range when either of the\n      // component iterators has reached the end of its range.\n      return\n          current1_ == end1_ ||\n          current2_ == end2_ ||\n          current3_ == end3_ ||\n          current4_ == end4_ ||\n          current5_ == end5_ ||\n          current6_ == end6_ ||\n          current7_ == end7_ ||\n          current8_ == end8_ ||\n          current9_ == end9_;\n    }\n\n    // No implementation - assignment is unsupported.\n    void operator=(const Iterator& other);\n\n    const ParamGeneratorInterface<ParamType>* const base_;\n    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.\n    // current[i]_ is the actual traversing iterator.\n    const typename ParamGenerator<T1>::iterator begin1_;\n    const typename ParamGenerator<T1>::iterator end1_;\n    typename ParamGenerator<T1>::iterator current1_;\n    const typename ParamGenerator<T2>::iterator begin2_;\n    const typename ParamGenerator<T2>::iterator end2_;\n    typename ParamGenerator<T2>::iterator current2_;\n    const typename ParamGenerator<T3>::iterator begin3_;\n    const typename ParamGenerator<T3>::iterator end3_;\n    typename ParamGenerator<T3>::iterator current3_;\n    const typename ParamGenerator<T4>::iterator begin4_;\n    const typename ParamGenerator<T4>::iterator end4_;\n    typename ParamGenerator<T4>::iterator current4_;\n    const typename ParamGenerator<T5>::iterator begin5_;\n    const typename ParamGenerator<T5>::iterator end5_;\n    typename ParamGenerator<T5>::iterator current5_;\n    const typename ParamGenerator<T6>::iterator begin6_;\n    const typename ParamGenerator<T6>::iterator end6_;\n    typename ParamGenerator<T6>::iterator current6_;\n    const typename ParamGenerator<T7>::iterator begin7_;\n    const typename ParamGenerator<T7>::iterator end7_;\n    typename ParamGenerator<T7>::iterator current7_;\n    const typename ParamGenerator<T8>::iterator begin8_;\n    const typename ParamGenerator<T8>::iterator end8_;\n    typename ParamGenerator<T8>::iterator current8_;\n    const typename ParamGenerator<T9>::iterator begin9_;\n    const typename ParamGenerator<T9>::iterator end9_;\n    typename ParamGenerator<T9>::iterator current9_;\n    ParamType current_value_;\n  };  // class CartesianProductGenerator9::Iterator\n\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductGenerator9& other);\n\n  const ParamGenerator<T1> g1_;\n  const ParamGenerator<T2> g2_;\n  const ParamGenerator<T3> g3_;\n  const ParamGenerator<T4> g4_;\n  const ParamGenerator<T5> g5_;\n  const ParamGenerator<T6> g6_;\n  const ParamGenerator<T7> g7_;\n  const ParamGenerator<T8> g8_;\n  const ParamGenerator<T9> g9_;\n};  // class CartesianProductGenerator9\n\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10>\nclass CartesianProductGenerator10\n    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,\n        T7, T8, T9, T10> > {\n public:\n  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> ParamType;\n\n  CartesianProductGenerator10(const ParamGenerator<T1>& g1,\n      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,\n      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,\n      const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,\n      const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9,\n      const ParamGenerator<T10>& g10)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),\n          g9_(g9), g10_(g10) {}\n  virtual ~CartesianProductGenerator10() {}\n\n  virtual ParamIteratorInterface<ParamType>* Begin() const {\n    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,\n        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,\n        g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin(), g10_, g10_.begin());\n  }\n  virtual ParamIteratorInterface<ParamType>* End() const {\n    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),\n        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,\n        g8_.end(), g9_, g9_.end(), g10_, g10_.end());\n  }\n\n private:\n  class Iterator : public ParamIteratorInterface<ParamType> {\n   public:\n    Iterator(const ParamGeneratorInterface<ParamType>* base,\n      const ParamGenerator<T1>& g1,\n      const typename ParamGenerator<T1>::iterator& current1,\n      const ParamGenerator<T2>& g2,\n      const typename ParamGenerator<T2>::iterator& current2,\n      const ParamGenerator<T3>& g3,\n      const typename ParamGenerator<T3>::iterator& current3,\n      const ParamGenerator<T4>& g4,\n      const typename ParamGenerator<T4>::iterator& current4,\n      const ParamGenerator<T5>& g5,\n      const typename ParamGenerator<T5>::iterator& current5,\n      const ParamGenerator<T6>& g6,\n      const typename ParamGenerator<T6>::iterator& current6,\n      const ParamGenerator<T7>& g7,\n      const typename ParamGenerator<T7>::iterator& current7,\n      const ParamGenerator<T8>& g8,\n      const typename ParamGenerator<T8>::iterator& current8,\n      const ParamGenerator<T9>& g9,\n      const typename ParamGenerator<T9>::iterator& current9,\n      const ParamGenerator<T10>& g10,\n      const typename ParamGenerator<T10>::iterator& current10)\n        : base_(base),\n          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),\n          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),\n          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),\n          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),\n          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),\n          begin6_(g6.begin()), end6_(g6.end()), current6_(current6),\n          begin7_(g7.begin()), end7_(g7.end()), current7_(current7),\n          begin8_(g8.begin()), end8_(g8.end()), current8_(current8),\n          begin9_(g9.begin()), end9_(g9.end()), current9_(current9),\n          begin10_(g10.begin()), end10_(g10.end()), current10_(current10)    {\n      ComputeCurrentValue();\n    }\n    virtual ~Iterator() {}\n\n    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {\n      return base_;\n    }\n    // Advance should not be called on beyond-of-range iterators\n    // so no component iterators must be beyond end of range, either.\n    virtual void Advance() {\n      assert(!AtEnd());\n      ++current10_;\n      if (current10_ == end10_) {\n        current10_ = begin10_;\n        ++current9_;\n      }\n      if (current9_ == end9_) {\n        current9_ = begin9_;\n        ++current8_;\n      }\n      if (current8_ == end8_) {\n        current8_ = begin8_;\n        ++current7_;\n      }\n      if (current7_ == end7_) {\n        current7_ = begin7_;\n        ++current6_;\n      }\n      if (current6_ == end6_) {\n        current6_ = begin6_;\n        ++current5_;\n      }\n      if (current5_ == end5_) {\n        current5_ = begin5_;\n        ++current4_;\n      }\n      if (current4_ == end4_) {\n        current4_ = begin4_;\n        ++current3_;\n      }\n      if (current3_ == end3_) {\n        current3_ = begin3_;\n        ++current2_;\n      }\n      if (current2_ == end2_) {\n        current2_ = begin2_;\n        ++current1_;\n      }\n      ComputeCurrentValue();\n    }\n    virtual ParamIteratorInterface<ParamType>* Clone() const {\n      return new Iterator(*this);\n    }\n    virtual const ParamType* Current() const { return &current_value_; }\n    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {\n      // Having the same base generator guarantees that the other\n      // iterator is of the same type and we can downcast.\n      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())\n          << \"The program attempted to compare iterators \"\n          << \"from different generators.\" << std::endl;\n      const Iterator* typed_other =\n          CheckedDowncastToActualType<const Iterator>(&other);\n      // We must report iterators equal if they both point beyond their\n      // respective ranges. That can happen in a variety of fashions,\n      // so we have to consult AtEnd().\n      return (AtEnd() && typed_other->AtEnd()) ||\n         (\n          current1_ == typed_other->current1_ &&\n          current2_ == typed_other->current2_ &&\n          current3_ == typed_other->current3_ &&\n          current4_ == typed_other->current4_ &&\n          current5_ == typed_other->current5_ &&\n          current6_ == typed_other->current6_ &&\n          current7_ == typed_other->current7_ &&\n          current8_ == typed_other->current8_ &&\n          current9_ == typed_other->current9_ &&\n          current10_ == typed_other->current10_);\n    }\n\n   private:\n    Iterator(const Iterator& other)\n        : base_(other.base_),\n        begin1_(other.begin1_),\n        end1_(other.end1_),\n        current1_(other.current1_),\n        begin2_(other.begin2_),\n        end2_(other.end2_),\n        current2_(other.current2_),\n        begin3_(other.begin3_),\n        end3_(other.end3_),\n        current3_(other.current3_),\n        begin4_(other.begin4_),\n        end4_(other.end4_),\n        current4_(other.current4_),\n        begin5_(other.begin5_),\n        end5_(other.end5_),\n        current5_(other.current5_),\n        begin6_(other.begin6_),\n        end6_(other.end6_),\n        current6_(other.current6_),\n        begin7_(other.begin7_),\n        end7_(other.end7_),\n        current7_(other.current7_),\n        begin8_(other.begin8_),\n        end8_(other.end8_),\n        current8_(other.current8_),\n        begin9_(other.begin9_),\n        end9_(other.end9_),\n        current9_(other.current9_),\n        begin10_(other.begin10_),\n        end10_(other.end10_),\n        current10_(other.current10_) {\n      ComputeCurrentValue();\n    }\n\n    void ComputeCurrentValue() {\n      if (!AtEnd())\n        current_value_ = ParamType(*current1_, *current2_, *current3_,\n            *current4_, *current5_, *current6_, *current7_, *current8_,\n            *current9_, *current10_);\n    }\n    bool AtEnd() const {\n      // We must report iterator past the end of the range when either of the\n      // component iterators has reached the end of its range.\n      return\n          current1_ == end1_ ||\n          current2_ == end2_ ||\n          current3_ == end3_ ||\n          current4_ == end4_ ||\n          current5_ == end5_ ||\n          current6_ == end6_ ||\n          current7_ == end7_ ||\n          current8_ == end8_ ||\n          current9_ == end9_ ||\n          current10_ == end10_;\n    }\n\n    // No implementation - assignment is unsupported.\n    void operator=(const Iterator& other);\n\n    const ParamGeneratorInterface<ParamType>* const base_;\n    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.\n    // current[i]_ is the actual traversing iterator.\n    const typename ParamGenerator<T1>::iterator begin1_;\n    const typename ParamGenerator<T1>::iterator end1_;\n    typename ParamGenerator<T1>::iterator current1_;\n    const typename ParamGenerator<T2>::iterator begin2_;\n    const typename ParamGenerator<T2>::iterator end2_;\n    typename ParamGenerator<T2>::iterator current2_;\n    const typename ParamGenerator<T3>::iterator begin3_;\n    const typename ParamGenerator<T3>::iterator end3_;\n    typename ParamGenerator<T3>::iterator current3_;\n    const typename ParamGenerator<T4>::iterator begin4_;\n    const typename ParamGenerator<T4>::iterator end4_;\n    typename ParamGenerator<T4>::iterator current4_;\n    const typename ParamGenerator<T5>::iterator begin5_;\n    const typename ParamGenerator<T5>::iterator end5_;\n    typename ParamGenerator<T5>::iterator current5_;\n    const typename ParamGenerator<T6>::iterator begin6_;\n    const typename ParamGenerator<T6>::iterator end6_;\n    typename ParamGenerator<T6>::iterator current6_;\n    const typename ParamGenerator<T7>::iterator begin7_;\n    const typename ParamGenerator<T7>::iterator end7_;\n    typename ParamGenerator<T7>::iterator current7_;\n    const typename ParamGenerator<T8>::iterator begin8_;\n    const typename ParamGenerator<T8>::iterator end8_;\n    typename ParamGenerator<T8>::iterator current8_;\n    const typename ParamGenerator<T9>::iterator begin9_;\n    const typename ParamGenerator<T9>::iterator end9_;\n    typename ParamGenerator<T9>::iterator current9_;\n    const typename ParamGenerator<T10>::iterator begin10_;\n    const typename ParamGenerator<T10>::iterator end10_;\n    typename ParamGenerator<T10>::iterator current10_;\n    ParamType current_value_;\n  };  // class CartesianProductGenerator10::Iterator\n\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductGenerator10& other);\n\n  const ParamGenerator<T1> g1_;\n  const ParamGenerator<T2> g2_;\n  const ParamGenerator<T3> g3_;\n  const ParamGenerator<T4> g4_;\n  const ParamGenerator<T5> g5_;\n  const ParamGenerator<T6> g6_;\n  const ParamGenerator<T7> g7_;\n  const ParamGenerator<T8> g8_;\n  const ParamGenerator<T9> g9_;\n  const ParamGenerator<T10> g10_;\n};  // class CartesianProductGenerator10\n\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// Helper classes providing Combine() with polymorphic features. They allow\n// casting CartesianProductGeneratorN<T> to ParamGenerator<U> if T is\n// convertible to U.\n//\ntemplate <class Generator1, class Generator2>\nclass CartesianProductHolder2 {\n public:\nCartesianProductHolder2(const Generator1& g1, const Generator2& g2)\n      : g1_(g1), g2_(g2) {}\n  template <typename T1, typename T2>\n  operator ParamGenerator< ::testing::tuple<T1, T2> >() const {\n    return ParamGenerator< ::testing::tuple<T1, T2> >(\n        new CartesianProductGenerator2<T1, T2>(\n        static_cast<ParamGenerator<T1> >(g1_),\n        static_cast<ParamGenerator<T2> >(g2_)));\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductHolder2& other);\n\n  const Generator1 g1_;\n  const Generator2 g2_;\n};  // class CartesianProductHolder2\n\ntemplate <class Generator1, class Generator2, class Generator3>\nclass CartesianProductHolder3 {\n public:\nCartesianProductHolder3(const Generator1& g1, const Generator2& g2,\n    const Generator3& g3)\n      : g1_(g1), g2_(g2), g3_(g3) {}\n  template <typename T1, typename T2, typename T3>\n  operator ParamGenerator< ::testing::tuple<T1, T2, T3> >() const {\n    return ParamGenerator< ::testing::tuple<T1, T2, T3> >(\n        new CartesianProductGenerator3<T1, T2, T3>(\n        static_cast<ParamGenerator<T1> >(g1_),\n        static_cast<ParamGenerator<T2> >(g2_),\n        static_cast<ParamGenerator<T3> >(g3_)));\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductHolder3& other);\n\n  const Generator1 g1_;\n  const Generator2 g2_;\n  const Generator3 g3_;\n};  // class CartesianProductHolder3\n\ntemplate <class Generator1, class Generator2, class Generator3,\n    class Generator4>\nclass CartesianProductHolder4 {\n public:\nCartesianProductHolder4(const Generator1& g1, const Generator2& g2,\n    const Generator3& g3, const Generator4& g4)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}\n  template <typename T1, typename T2, typename T3, typename T4>\n  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4> >() const {\n    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4> >(\n        new CartesianProductGenerator4<T1, T2, T3, T4>(\n        static_cast<ParamGenerator<T1> >(g1_),\n        static_cast<ParamGenerator<T2> >(g2_),\n        static_cast<ParamGenerator<T3> >(g3_),\n        static_cast<ParamGenerator<T4> >(g4_)));\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductHolder4& other);\n\n  const Generator1 g1_;\n  const Generator2 g2_;\n  const Generator3 g3_;\n  const Generator4 g4_;\n};  // class CartesianProductHolder4\n\ntemplate <class Generator1, class Generator2, class Generator3,\n    class Generator4, class Generator5>\nclass CartesianProductHolder5 {\n public:\nCartesianProductHolder5(const Generator1& g1, const Generator2& g2,\n    const Generator3& g3, const Generator4& g4, const Generator5& g5)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}\n  template <typename T1, typename T2, typename T3, typename T4, typename T5>\n  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5> >() const {\n    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5> >(\n        new CartesianProductGenerator5<T1, T2, T3, T4, T5>(\n        static_cast<ParamGenerator<T1> >(g1_),\n        static_cast<ParamGenerator<T2> >(g2_),\n        static_cast<ParamGenerator<T3> >(g3_),\n        static_cast<ParamGenerator<T4> >(g4_),\n        static_cast<ParamGenerator<T5> >(g5_)));\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductHolder5& other);\n\n  const Generator1 g1_;\n  const Generator2 g2_;\n  const Generator3 g3_;\n  const Generator4 g4_;\n  const Generator5 g5_;\n};  // class CartesianProductHolder5\n\ntemplate <class Generator1, class Generator2, class Generator3,\n    class Generator4, class Generator5, class Generator6>\nclass CartesianProductHolder6 {\n public:\nCartesianProductHolder6(const Generator1& g1, const Generator2& g2,\n    const Generator3& g3, const Generator4& g4, const Generator5& g5,\n    const Generator6& g6)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}\n  template <typename T1, typename T2, typename T3, typename T4, typename T5,\n      typename T6>\n  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6> >() const {\n    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6> >(\n        new CartesianProductGenerator6<T1, T2, T3, T4, T5, T6>(\n        static_cast<ParamGenerator<T1> >(g1_),\n        static_cast<ParamGenerator<T2> >(g2_),\n        static_cast<ParamGenerator<T3> >(g3_),\n        static_cast<ParamGenerator<T4> >(g4_),\n        static_cast<ParamGenerator<T5> >(g5_),\n        static_cast<ParamGenerator<T6> >(g6_)));\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductHolder6& other);\n\n  const Generator1 g1_;\n  const Generator2 g2_;\n  const Generator3 g3_;\n  const Generator4 g4_;\n  const Generator5 g5_;\n  const Generator6 g6_;\n};  // class CartesianProductHolder6\n\ntemplate <class Generator1, class Generator2, class Generator3,\n    class Generator4, class Generator5, class Generator6, class Generator7>\nclass CartesianProductHolder7 {\n public:\nCartesianProductHolder7(const Generator1& g1, const Generator2& g2,\n    const Generator3& g3, const Generator4& g4, const Generator5& g5,\n    const Generator6& g6, const Generator7& g7)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}\n  template <typename T1, typename T2, typename T3, typename T4, typename T5,\n      typename T6, typename T7>\n  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6,\n      T7> >() const {\n    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7> >(\n        new CartesianProductGenerator7<T1, T2, T3, T4, T5, T6, T7>(\n        static_cast<ParamGenerator<T1> >(g1_),\n        static_cast<ParamGenerator<T2> >(g2_),\n        static_cast<ParamGenerator<T3> >(g3_),\n        static_cast<ParamGenerator<T4> >(g4_),\n        static_cast<ParamGenerator<T5> >(g5_),\n        static_cast<ParamGenerator<T6> >(g6_),\n        static_cast<ParamGenerator<T7> >(g7_)));\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductHolder7& other);\n\n  const Generator1 g1_;\n  const Generator2 g2_;\n  const Generator3 g3_;\n  const Generator4 g4_;\n  const Generator5 g5_;\n  const Generator6 g6_;\n  const Generator7 g7_;\n};  // class CartesianProductHolder7\n\ntemplate <class Generator1, class Generator2, class Generator3,\n    class Generator4, class Generator5, class Generator6, class Generator7,\n    class Generator8>\nclass CartesianProductHolder8 {\n public:\nCartesianProductHolder8(const Generator1& g1, const Generator2& g2,\n    const Generator3& g3, const Generator4& g4, const Generator5& g5,\n    const Generator6& g6, const Generator7& g7, const Generator8& g8)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),\n          g8_(g8) {}\n  template <typename T1, typename T2, typename T3, typename T4, typename T5,\n      typename T6, typename T7, typename T8>\n  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7,\n      T8> >() const {\n    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8> >(\n        new CartesianProductGenerator8<T1, T2, T3, T4, T5, T6, T7, T8>(\n        static_cast<ParamGenerator<T1> >(g1_),\n        static_cast<ParamGenerator<T2> >(g2_),\n        static_cast<ParamGenerator<T3> >(g3_),\n        static_cast<ParamGenerator<T4> >(g4_),\n        static_cast<ParamGenerator<T5> >(g5_),\n        static_cast<ParamGenerator<T6> >(g6_),\n        static_cast<ParamGenerator<T7> >(g7_),\n        static_cast<ParamGenerator<T8> >(g8_)));\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductHolder8& other);\n\n  const Generator1 g1_;\n  const Generator2 g2_;\n  const Generator3 g3_;\n  const Generator4 g4_;\n  const Generator5 g5_;\n  const Generator6 g6_;\n  const Generator7 g7_;\n  const Generator8 g8_;\n};  // class CartesianProductHolder8\n\ntemplate <class Generator1, class Generator2, class Generator3,\n    class Generator4, class Generator5, class Generator6, class Generator7,\n    class Generator8, class Generator9>\nclass CartesianProductHolder9 {\n public:\nCartesianProductHolder9(const Generator1& g1, const Generator2& g2,\n    const Generator3& g3, const Generator4& g4, const Generator5& g5,\n    const Generator6& g6, const Generator7& g7, const Generator8& g8,\n    const Generator9& g9)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),\n          g9_(g9) {}\n  template <typename T1, typename T2, typename T3, typename T4, typename T5,\n      typename T6, typename T7, typename T8, typename T9>\n  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8,\n      T9> >() const {\n    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8,\n        T9> >(\n        new CartesianProductGenerator9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(\n        static_cast<ParamGenerator<T1> >(g1_),\n        static_cast<ParamGenerator<T2> >(g2_),\n        static_cast<ParamGenerator<T3> >(g3_),\n        static_cast<ParamGenerator<T4> >(g4_),\n        static_cast<ParamGenerator<T5> >(g5_),\n        static_cast<ParamGenerator<T6> >(g6_),\n        static_cast<ParamGenerator<T7> >(g7_),\n        static_cast<ParamGenerator<T8> >(g8_),\n        static_cast<ParamGenerator<T9> >(g9_)));\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductHolder9& other);\n\n  const Generator1 g1_;\n  const Generator2 g2_;\n  const Generator3 g3_;\n  const Generator4 g4_;\n  const Generator5 g5_;\n  const Generator6 g6_;\n  const Generator7 g7_;\n  const Generator8 g8_;\n  const Generator9 g9_;\n};  // class CartesianProductHolder9\n\ntemplate <class Generator1, class Generator2, class Generator3,\n    class Generator4, class Generator5, class Generator6, class Generator7,\n    class Generator8, class Generator9, class Generator10>\nclass CartesianProductHolder10 {\n public:\nCartesianProductHolder10(const Generator1& g1, const Generator2& g2,\n    const Generator3& g3, const Generator4& g4, const Generator5& g5,\n    const Generator6& g6, const Generator7& g7, const Generator8& g8,\n    const Generator9& g9, const Generator10& g10)\n      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),\n          g9_(g9), g10_(g10) {}\n  template <typename T1, typename T2, typename T3, typename T4, typename T5,\n      typename T6, typename T7, typename T8, typename T9, typename T10>\n  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9,\n      T10> >() const {\n    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9,\n        T10> >(\n        new CartesianProductGenerator10<T1, T2, T3, T4, T5, T6, T7, T8, T9,\n            T10>(\n        static_cast<ParamGenerator<T1> >(g1_),\n        static_cast<ParamGenerator<T2> >(g2_),\n        static_cast<ParamGenerator<T3> >(g3_),\n        static_cast<ParamGenerator<T4> >(g4_),\n        static_cast<ParamGenerator<T5> >(g5_),\n        static_cast<ParamGenerator<T6> >(g6_),\n        static_cast<ParamGenerator<T7> >(g7_),\n        static_cast<ParamGenerator<T8> >(g8_),\n        static_cast<ParamGenerator<T9> >(g9_),\n        static_cast<ParamGenerator<T10> >(g10_)));\n  }\n\n private:\n  // No implementation - assignment is unsupported.\n  void operator=(const CartesianProductHolder10& other);\n\n  const Generator1 g1_;\n  const Generator2 g2_;\n  const Generator3 g3_;\n  const Generator4 g4_;\n  const Generator5 g5_;\n  const Generator6 g6_;\n  const Generator7 g7_;\n  const Generator8 g8_;\n  const Generator9 g9_;\n  const Generator10 g10_;\n};  // class CartesianProductHolder10\n\n# endif  // GTEST_HAS_COMBINE\n\n}  // namespace internal\n}  // namespace testing\n\n#endif  //  GTEST_HAS_PARAM_TEST\n\n#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_\n\n#if GTEST_HAS_PARAM_TEST\n\nnamespace testing {\n\n// Functions producing parameter generators.\n//\n// Google Test uses these generators to produce parameters for value-\n// parameterized tests. When a parameterized test case is instantiated\n// with a particular generator, Google Test creates and runs tests\n// for each element in the sequence produced by the generator.\n//\n// In the following sample, tests from test case FooTest are instantiated\n// each three times with parameter values 3, 5, and 8:\n//\n// class FooTest : public TestWithParam<int> { ... };\n//\n// TEST_P(FooTest, TestThis) {\n// }\n// TEST_P(FooTest, TestThat) {\n// }\n// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8));\n//\n\n// Range() returns generators providing sequences of values in a range.\n//\n// Synopsis:\n// Range(start, end)\n//   - returns a generator producing a sequence of values {start, start+1,\n//     start+2, ..., }.\n// Range(start, end, step)\n//   - returns a generator producing a sequence of values {start, start+step,\n//     start+step+step, ..., }.\n// Notes:\n//   * The generated sequences never include end. For example, Range(1, 5)\n//     returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2)\n//     returns a generator producing {1, 3, 5, 7}.\n//   * start and end must have the same type. That type may be any integral or\n//     floating-point type or a user defined type satisfying these conditions:\n//     * It must be assignable (have operator=() defined).\n//     * It must have operator+() (operator+(int-compatible type) for\n//       two-operand version).\n//     * It must have operator<() defined.\n//     Elements in the resulting sequences will also have that type.\n//   * Condition start < end must be satisfied in order for resulting sequences\n//     to contain any elements.\n//\ntemplate <typename T, typename IncrementT>\ninternal::ParamGenerator<T> Range(T start, T end, IncrementT step) {\n  return internal::ParamGenerator<T>(\n      new internal::RangeGenerator<T, IncrementT>(start, end, step));\n}\n\ntemplate <typename T>\ninternal::ParamGenerator<T> Range(T start, T end) {\n  return Range(start, end, 1);\n}\n\n// ValuesIn() function allows generation of tests with parameters coming from\n// a container.\n//\n// Synopsis:\n// ValuesIn(const T (&array)[N])\n//   - returns a generator producing sequences with elements from\n//     a C-style array.\n// ValuesIn(const Container& container)\n//   - returns a generator producing sequences with elements from\n//     an STL-style container.\n// ValuesIn(Iterator begin, Iterator end)\n//   - returns a generator producing sequences with elements from\n//     a range [begin, end) defined by a pair of STL-style iterators. These\n//     iterators can also be plain C pointers.\n//\n// Please note that ValuesIn copies the values from the containers\n// passed in and keeps them to generate tests in RUN_ALL_TESTS().\n//\n// Examples:\n//\n// This instantiates tests from test case StringTest\n// each with C-string values of \"foo\", \"bar\", and \"baz\":\n//\n// const char* strings[] = {\"foo\", \"bar\", \"baz\"};\n// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings));\n//\n// This instantiates tests from test case StlStringTest\n// each with STL strings with values \"a\" and \"b\":\n//\n// ::std::vector< ::std::string> GetParameterStrings() {\n//   ::std::vector< ::std::string> v;\n//   v.push_back(\"a\");\n//   v.push_back(\"b\");\n//   return v;\n// }\n//\n// INSTANTIATE_TEST_CASE_P(CharSequence,\n//                         StlStringTest,\n//                         ValuesIn(GetParameterStrings()));\n//\n//\n// This will also instantiate tests from CharTest\n// each with parameter values 'a' and 'b':\n//\n// ::std::list<char> GetParameterChars() {\n//   ::std::list<char> list;\n//   list.push_back('a');\n//   list.push_back('b');\n//   return list;\n// }\n// ::std::list<char> l = GetParameterChars();\n// INSTANTIATE_TEST_CASE_P(CharSequence2,\n//                         CharTest,\n//                         ValuesIn(l.begin(), l.end()));\n//\ntemplate <typename ForwardIterator>\ninternal::ParamGenerator<\n  typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>\nValuesIn(ForwardIterator begin, ForwardIterator end) {\n  typedef typename ::testing::internal::IteratorTraits<ForwardIterator>\n      ::value_type ParamType;\n  return internal::ParamGenerator<ParamType>(\n      new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));\n}\n\ntemplate <typename T, size_t N>\ninternal::ParamGenerator<T> ValuesIn(const T (&array)[N]) {\n  return ValuesIn(array, array + N);\n}\n\ntemplate <class Container>\ninternal::ParamGenerator<typename Container::value_type> ValuesIn(\n    const Container& container) {\n  return ValuesIn(container.begin(), container.end());\n}\n\n// Values() allows generating tests from explicitly specified list of\n// parameters.\n//\n// Synopsis:\n// Values(T v1, T v2, ..., T vN)\n//   - returns a generator producing sequences with elements v1, v2, ..., vN.\n//\n// For example, this instantiates tests from test case BarTest each\n// with values \"one\", \"two\", and \"three\":\n//\n// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values(\"one\", \"two\", \"three\"));\n//\n// This instantiates tests from test case BazTest each with values 1, 2, 3.5.\n// The exact type of values will depend on the type of parameter in BazTest.\n//\n// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));\n//\n// Currently, Values() supports from 1 to 50 parameters.\n//\ntemplate <typename T1>\ninternal::ValueArray1<T1> Values(T1 v1) {\n  return internal::ValueArray1<T1>(v1);\n}\n\ntemplate <typename T1, typename T2>\ninternal::ValueArray2<T1, T2> Values(T1 v1, T2 v2) {\n  return internal::ValueArray2<T1, T2>(v1, v2);\n}\n\ntemplate <typename T1, typename T2, typename T3>\ninternal::ValueArray3<T1, T2, T3> Values(T1 v1, T2 v2, T3 v3) {\n  return internal::ValueArray3<T1, T2, T3>(v1, v2, v3);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4>\ninternal::ValueArray4<T1, T2, T3, T4> Values(T1 v1, T2 v2, T3 v3, T4 v4) {\n  return internal::ValueArray4<T1, T2, T3, T4>(v1, v2, v3, v4);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5>\ninternal::ValueArray5<T1, T2, T3, T4, T5> Values(T1 v1, T2 v2, T3 v3, T4 v4,\n    T5 v5) {\n  return internal::ValueArray5<T1, T2, T3, T4, T5>(v1, v2, v3, v4, v5);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6>\ninternal::ValueArray6<T1, T2, T3, T4, T5, T6> Values(T1 v1, T2 v2, T3 v3,\n    T4 v4, T5 v5, T6 v6) {\n  return internal::ValueArray6<T1, T2, T3, T4, T5, T6>(v1, v2, v3, v4, v5, v6);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7>\ninternal::ValueArray7<T1, T2, T3, T4, T5, T6, T7> Values(T1 v1, T2 v2, T3 v3,\n    T4 v4, T5 v5, T6 v6, T7 v7) {\n  return internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7>(v1, v2, v3, v4, v5,\n      v6, v7);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8>\ninternal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8> Values(T1 v1, T2 v2,\n    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) {\n  return internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8>(v1, v2, v3, v4,\n      v5, v6, v7, v8);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9>\ninternal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9> Values(T1 v1, T2 v2,\n    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9) {\n  return internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(v1, v2, v3,\n      v4, v5, v6, v7, v8, v9);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10>\ninternal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> Values(T1 v1,\n    T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10) {\n  return internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>(v1,\n      v2, v3, v4, v5, v6, v7, v8, v9, v10);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11>\ninternal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,\n    T11> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n    T10 v10, T11 v11) {\n  return internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,\n      T11>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12>\ninternal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n    T12> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n    T10 v10, T11 v11, T12 v12) {\n  return internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13>\ninternal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,\n    T13> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n    T10 v10, T11 v11, T12 v12, T13 v13) {\n  return internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14>\ninternal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) {\n  return internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,\n      v14);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15>\ninternal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,\n    T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) {\n  return internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,\n      v13, v14, v15);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16>\ninternal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,\n    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,\n    T16 v16) {\n  return internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,\n      v12, v13, v14, v15, v16);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17>\ninternal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,\n    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,\n    T16 v16, T17 v17) {\n  return internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,\n      v11, v12, v13, v14, v15, v16, v17);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18>\ninternal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,\n    T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,\n    T16 v16, T17 v17, T18 v18) {\n  return internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18>(v1, v2, v3, v4, v5, v6, v7, v8, v9,\n      v10, v11, v12, v13, v14, v15, v16, v17, v18);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19>\ninternal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,\n    T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,\n    T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) {\n  return internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19>(v1, v2, v3, v4, v5, v6, v7, v8,\n      v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20>\ninternal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20> Values(T1 v1, T2 v2, T3 v3, T4 v4,\n    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,\n    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20) {\n  return internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20>(v1, v2, v3, v4, v5, v6, v7,\n      v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21>\ninternal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21> Values(T1 v1, T2 v2, T3 v3, T4 v4,\n    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,\n    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21) {\n  return internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>(v1, v2, v3, v4, v5, v6,\n      v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22>\ninternal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22> Values(T1 v1, T2 v2, T3 v3,\n    T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,\n    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,\n    T21 v21, T22 v22) {\n  return internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>(v1, v2, v3, v4,\n      v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,\n      v20, v21, v22);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23>\ninternal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> Values(T1 v1, T2 v2,\n    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,\n    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,\n    T21 v21, T22 v22, T23 v23) {\n  return internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>(v1, v2, v3,\n      v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,\n      v20, v21, v22, v23);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24>\ninternal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Values(T1 v1, T2 v2,\n    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,\n    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,\n    T21 v21, T22 v22, T23 v23, T24 v24) {\n  return internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>(v1, v2,\n      v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,\n      v19, v20, v21, v22, v23, v24);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25>\ninternal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Values(T1 v1,\n    T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,\n    T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,\n    T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25) {\n  return internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25>(v1,\n      v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,\n      v18, v19, v20, v21, v22, v23, v24, v25);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26>\ninternal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n    T26> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n    T26 v26) {\n  return internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,\n      v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27>\ninternal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,\n    T27> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n    T26 v26, T27 v27) {\n  return internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,\n      v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28>\ninternal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,\n    T28> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n    T26 v26, T27 v27, T28 v28) {\n  return internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,\n      v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,\n      v28);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29>\ninternal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n    T26 v26, T27 v27, T28 v28, T29 v29) {\n  return internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,\n      v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,\n      v27, v28, v29);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30>\ninternal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,\n    T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,\n    T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,\n    T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) {\n  return internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,\n      v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,\n      v26, v27, v28, v29, v30);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31>\ninternal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,\n    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,\n    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,\n    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) {\n  return internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,\n      v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,\n      v25, v26, v27, v28, v29, v30, v31);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32>\ninternal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,\n    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,\n    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,\n    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,\n    T32 v32) {\n  return internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32>(v1, v2, v3, v4, v5, v6, v7, v8, v9,\n      v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,\n      v24, v25, v26, v27, v28, v29, v30, v31, v32);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33>\ninternal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,\n    T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,\n    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,\n    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,\n    T32 v32, T33 v33) {\n  return internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33>(v1, v2, v3, v4, v5, v6, v7, v8,\n      v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,\n      v24, v25, v26, v27, v28, v29, v30, v31, v32, v33);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34>\ninternal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,\n    T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,\n    T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,\n    T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,\n    T31 v31, T32 v32, T33 v33, T34 v34) {\n  return internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34>(v1, v2, v3, v4, v5, v6, v7,\n      v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,\n      v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35>\ninternal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35> Values(T1 v1, T2 v2, T3 v3, T4 v4,\n    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,\n    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,\n    T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,\n    T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35) {\n  return internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35>(v1, v2, v3, v4, v5, v6,\n      v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,\n      v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36>\ninternal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36> Values(T1 v1, T2 v2, T3 v3, T4 v4,\n    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,\n    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,\n    T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,\n    T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36) {\n  return internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36>(v1, v2, v3, v4,\n      v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,\n      v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,\n      v34, v35, v36);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37>\ninternal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37> Values(T1 v1, T2 v2, T3 v3,\n    T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,\n    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,\n    T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,\n    T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,\n    T37 v37) {\n  return internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37>(v1, v2, v3,\n      v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,\n      v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,\n      v34, v35, v36, v37);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38>\ninternal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Values(T1 v1, T2 v2,\n    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,\n    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,\n    T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,\n    T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,\n    T37 v37, T38 v38) {\n  return internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38>(v1, v2,\n      v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,\n      v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32,\n      v33, v34, v35, v36, v37, v38);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39>\ninternal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Values(T1 v1, T2 v2,\n    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,\n    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,\n    T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,\n    T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,\n    T37 v37, T38 v38, T39 v39) {\n  return internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39>(v1,\n      v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,\n      v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31,\n      v32, v33, v34, v35, v36, v37, v38, v39);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40>\ninternal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Values(T1 v1,\n    T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,\n    T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,\n    T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27,\n    T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35,\n    T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) {\n  return internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,\n      T40>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,\n      v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29,\n      v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41>\ninternal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,\n    T41> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n    T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n    T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41) {\n  return internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,\n      T40, T41>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,\n      v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28,\n      v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42>\ninternal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,\n    T42> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n    T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n    T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,\n    T42 v42) {\n  return internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,\n      T40, T41, T42>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,\n      v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,\n      v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41,\n      v42);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43>\ninternal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,\n    T43> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n    T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n    T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,\n    T42 v42, T43 v43) {\n  return internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,\n      T40, T41, T42, T43>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,\n      v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,\n      v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40,\n      v41, v42, v43);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44>\ninternal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n    T44> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,\n    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,\n    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,\n    T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,\n    T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,\n    T42 v42, T43 v43, T44 v44) {\n  return internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,\n      T40, T41, T42, T43, T44>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,\n      v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,\n      v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39,\n      v40, v41, v42, v43, v44);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45>\ninternal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n    T44, T45> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,\n    T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,\n    T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,\n    T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,\n    T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,\n    T41 v41, T42 v42, T43 v43, T44 v44, T45 v45) {\n  return internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,\n      T40, T41, T42, T43, T44, T45>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,\n      v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,\n      v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38,\n      v39, v40, v41, v42, v43, v44, v45);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46>\ninternal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n    T44, T45, T46> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,\n    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,\n    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,\n    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,\n    T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,\n    T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) {\n  return internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,\n      T40, T41, T42, T43, T44, T45, T46>(v1, v2, v3, v4, v5, v6, v7, v8, v9,\n      v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,\n      v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,\n      v38, v39, v40, v41, v42, v43, v44, v45, v46);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47>\ninternal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n    T44, T45, T46, T47> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,\n    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,\n    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,\n    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,\n    T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,\n    T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) {\n  return internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,\n      T40, T41, T42, T43, T44, T45, T46, T47>(v1, v2, v3, v4, v5, v6, v7, v8,\n      v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,\n      v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,\n      v38, v39, v40, v41, v42, v43, v44, v45, v46, v47);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47, typename T48>\ninternal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n    T44, T45, T46, T47, T48> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,\n    T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,\n    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,\n    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,\n    T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,\n    T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47,\n    T48 v48) {\n  return internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,\n      T40, T41, T42, T43, T44, T45, T46, T47, T48>(v1, v2, v3, v4, v5, v6, v7,\n      v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,\n      v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36,\n      v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47, typename T48, typename T49>\ninternal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n    T44, T45, T46, T47, T48, T49> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,\n    T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,\n    T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,\n    T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,\n    T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38,\n    T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46,\n    T47 v47, T48 v48, T49 v49) {\n  return internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,\n      T40, T41, T42, T43, T44, T45, T46, T47, T48, T49>(v1, v2, v3, v4, v5, v6,\n      v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,\n      v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35,\n      v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49);\n}\n\ntemplate <typename T1, typename T2, typename T3, typename T4, typename T5,\n    typename T6, typename T7, typename T8, typename T9, typename T10,\n    typename T11, typename T12, typename T13, typename T14, typename T15,\n    typename T16, typename T17, typename T18, typename T19, typename T20,\n    typename T21, typename T22, typename T23, typename T24, typename T25,\n    typename T26, typename T27, typename T28, typename T29, typename T30,\n    typename T31, typename T32, typename T33, typename T34, typename T35,\n    typename T36, typename T37, typename T38, typename T39, typename T40,\n    typename T41, typename T42, typename T43, typename T44, typename T45,\n    typename T46, typename T47, typename T48, typename T49, typename T50>\ninternal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,\n    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,\n    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,\n    T44, T45, T46, T47, T48, T49, T50> Values(T1 v1, T2 v2, T3 v3, T4 v4,\n    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,\n    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,\n    T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,\n    T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37,\n    T38 v38, T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45,\n    T46 v46, T47 v47, T48 v48, T49 v49, T50 v50) {\n  return internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,\n      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,\n      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,\n      T40, T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>(v1, v2, v3, v4,\n      v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,\n      v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,\n      v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47,\n      v48, v49, v50);\n}\n\n// Bool() allows generating tests with parameters in a set of (false, true).\n//\n// Synopsis:\n// Bool()\n//   - returns a generator producing sequences with elements {false, true}.\n//\n// It is useful when testing code that depends on Boolean flags. Combinations\n// of multiple flags can be tested when several Bool()'s are combined using\n// Combine() function.\n//\n// In the following example all tests in the test case FlagDependentTest\n// will be instantiated twice with parameters false and true.\n//\n// class FlagDependentTest : public testing::TestWithParam<bool> {\n//   virtual void SetUp() {\n//     external_flag = GetParam();\n//   }\n// }\n// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool());\n//\ninline internal::ParamGenerator<bool> Bool() {\n  return Values(false, true);\n}\n\n# if GTEST_HAS_COMBINE\n// Combine() allows the user to combine two or more sequences to produce\n// values of a Cartesian product of those sequences' elements.\n//\n// Synopsis:\n// Combine(gen1, gen2, ..., genN)\n//   - returns a generator producing sequences with elements coming from\n//     the Cartesian product of elements from the sequences generated by\n//     gen1, gen2, ..., genN. The sequence elements will have a type of\n//     tuple<T1, T2, ..., TN> where T1, T2, ..., TN are the types\n//     of elements from sequences produces by gen1, gen2, ..., genN.\n//\n// Combine can have up to 10 arguments. This number is currently limited\n// by the maximum number of elements in the tuple implementation used by Google\n// Test.\n//\n// Example:\n//\n// This will instantiate tests in test case AnimalTest each one with\n// the parameter values tuple(\"cat\", BLACK), tuple(\"cat\", WHITE),\n// tuple(\"dog\", BLACK), and tuple(\"dog\", WHITE):\n//\n// enum Color { BLACK, GRAY, WHITE };\n// class AnimalTest\n//     : public testing::TestWithParam<tuple<const char*, Color> > {...};\n//\n// TEST_P(AnimalTest, AnimalLooksNice) {...}\n//\n// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest,\n//                         Combine(Values(\"cat\", \"dog\"),\n//                                 Values(BLACK, WHITE)));\n//\n// This will instantiate tests in FlagDependentTest with all variations of two\n// Boolean flags:\n//\n// class FlagDependentTest\n//     : public testing::TestWithParam<tuple<bool, bool> > {\n//   virtual void SetUp() {\n//     // Assigns external_flag_1 and external_flag_2 values from the tuple.\n//     tie(external_flag_1, external_flag_2) = GetParam();\n//   }\n// };\n//\n// TEST_P(FlagDependentTest, TestFeature1) {\n//   // Test your code using external_flag_1 and external_flag_2 here.\n// }\n// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest,\n//                         Combine(Bool(), Bool()));\n//\ntemplate <typename Generator1, typename Generator2>\ninternal::CartesianProductHolder2<Generator1, Generator2> Combine(\n    const Generator1& g1, const Generator2& g2) {\n  return internal::CartesianProductHolder2<Generator1, Generator2>(\n      g1, g2);\n}\n\ntemplate <typename Generator1, typename Generator2, typename Generator3>\ninternal::CartesianProductHolder3<Generator1, Generator2, Generator3> Combine(\n    const Generator1& g1, const Generator2& g2, const Generator3& g3) {\n  return internal::CartesianProductHolder3<Generator1, Generator2, Generator3>(\n      g1, g2, g3);\n}\n\ntemplate <typename Generator1, typename Generator2, typename Generator3,\n    typename Generator4>\ninternal::CartesianProductHolder4<Generator1, Generator2, Generator3,\n    Generator4> Combine(\n    const Generator1& g1, const Generator2& g2, const Generator3& g3,\n        const Generator4& g4) {\n  return internal::CartesianProductHolder4<Generator1, Generator2, Generator3,\n      Generator4>(\n      g1, g2, g3, g4);\n}\n\ntemplate <typename Generator1, typename Generator2, typename Generator3,\n    typename Generator4, typename Generator5>\ninternal::CartesianProductHolder5<Generator1, Generator2, Generator3,\n    Generator4, Generator5> Combine(\n    const Generator1& g1, const Generator2& g2, const Generator3& g3,\n        const Generator4& g4, const Generator5& g5) {\n  return internal::CartesianProductHolder5<Generator1, Generator2, Generator3,\n      Generator4, Generator5>(\n      g1, g2, g3, g4, g5);\n}\n\ntemplate <typename Generator1, typename Generator2, typename Generator3,\n    typename Generator4, typename Generator5, typename Generator6>\ninternal::CartesianProductHolder6<Generator1, Generator2, Generator3,\n    Generator4, Generator5, Generator6> Combine(\n    const Generator1& g1, const Generator2& g2, const Generator3& g3,\n        const Generator4& g4, const Generator5& g5, const Generator6& g6) {\n  return internal::CartesianProductHolder6<Generator1, Generator2, Generator3,\n      Generator4, Generator5, Generator6>(\n      g1, g2, g3, g4, g5, g6);\n}\n\ntemplate <typename Generator1, typename Generator2, typename Generator3,\n    typename Generator4, typename Generator5, typename Generator6,\n    typename Generator7>\ninternal::CartesianProductHolder7<Generator1, Generator2, Generator3,\n    Generator4, Generator5, Generator6, Generator7> Combine(\n    const Generator1& g1, const Generator2& g2, const Generator3& g3,\n        const Generator4& g4, const Generator5& g5, const Generator6& g6,\n        const Generator7& g7) {\n  return internal::CartesianProductHolder7<Generator1, Generator2, Generator3,\n      Generator4, Generator5, Generator6, Generator7>(\n      g1, g2, g3, g4, g5, g6, g7);\n}\n\ntemplate <typename Generator1, typename Generator2, typename Generator3,\n    typename Generator4, typename Generator5, typename Generator6,\n    typename Generator7, typename Generator8>\ninternal::CartesianProductHolder8<Generator1, Generator2, Generator3,\n    Generator4, Generator5, Generator6, Generator7, Generator8> Combine(\n    const Generator1& g1, const Generator2& g2, const Generator3& g3,\n        const Generator4& g4, const Generator5& g5, const Generator6& g6,\n        const Generator7& g7, const Generator8& g8) {\n  return internal::CartesianProductHolder8<Generator1, Generator2, Generator3,\n      Generator4, Generator5, Generator6, Generator7, Generator8>(\n      g1, g2, g3, g4, g5, g6, g7, g8);\n}\n\ntemplate <typename Generator1, typename Generator2, typename Generator3,\n    typename Generator4, typename Generator5, typename Generator6,\n    typename Generator7, typename Generator8, typename Generator9>\ninternal::CartesianProductHolder9<Generator1, Generator2, Generator3,\n    Generator4, Generator5, Generator6, Generator7, Generator8,\n    Generator9> Combine(\n    const Generator1& g1, const Generator2& g2, const Generator3& g3,\n        const Generator4& g4, const Generator5& g5, const Generator6& g6,\n        const Generator7& g7, const Generator8& g8, const Generator9& g9) {\n  return internal::CartesianProductHolder9<Generator1, Generator2, Generator3,\n      Generator4, Generator5, Generator6, Generator7, Generator8, Generator9>(\n      g1, g2, g3, g4, g5, g6, g7, g8, g9);\n}\n\ntemplate <typename Generator1, typename Generator2, typename Generator3,\n    typename Generator4, typename Generator5, typename Generator6,\n    typename Generator7, typename Generator8, typename Generator9,\n    typename Generator10>\ninternal::CartesianProductHolder10<Generator1, Generator2, Generator3,\n    Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,\n    Generator10> Combine(\n    const Generator1& g1, const Generator2& g2, const Generator3& g3,\n        const Generator4& g4, const Generator5& g5, const Generator6& g6,\n        const Generator7& g7, const Generator8& g8, const Generator9& g9,\n        const Generator10& g10) {\n  return internal::CartesianProductHolder10<Generator1, Generator2, Generator3,\n      Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,\n      Generator10>(\n      g1, g2, g3, g4, g5, g6, g7, g8, g9, g10);\n}\n# endif  // GTEST_HAS_COMBINE\n\n\n\n# define TEST_P(test_case_name, test_name) \\\n  class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \\\n      : public test_case_name { \\\n   public: \\\n    GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \\\n    virtual void TestBody(); \\\n   private: \\\n    static int AddToRegistry() { \\\n      ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \\\n          GetTestCasePatternHolder<test_case_name>(\\\n              #test_case_name, \\\n              ::testing::internal::CodeLocation(\\\n                  __FILE__, __LINE__))->AddTestPattern(\\\n                      #test_case_name, \\\n                      #test_name, \\\n                      new ::testing::internal::TestMetaFactory< \\\n                          GTEST_TEST_CLASS_NAME_(\\\n                              test_case_name, test_name)>()); \\\n      return 0; \\\n    } \\\n    static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \\\n    GTEST_DISALLOW_COPY_AND_ASSIGN_(\\\n        GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \\\n  }; \\\n  int GTEST_TEST_CLASS_NAME_(test_case_name, \\\n                             test_name)::gtest_registering_dummy_ = \\\n      GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \\\n  void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()\n\n// The optional last argument to INSTANTIATE_TEST_CASE_P allows the user\n// to specify a function or functor that generates custom test name suffixes\n// based on the test parameters. The function should accept one argument of\n// type testing::TestParamInfo<class ParamType>, and return std::string.\n//\n// testing::PrintToStringParamName is a builtin test suffix generator that\n// returns the value of testing::PrintToString(GetParam()). It does not work\n// for std::string or C strings.\n//\n// Note: test names must be non-empty, unique, and may only contain ASCII\n// alphanumeric characters or underscore.\n\n# define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator, ...) \\\n  ::testing::internal::ParamGenerator<test_case_name::ParamType> \\\n      gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \\\n  ::std::string gtest_##prefix##test_case_name##_EvalGenerateName_( \\\n      const ::testing::TestParamInfo<test_case_name::ParamType>& info) { \\\n    return ::testing::internal::GetParamNameGen<test_case_name::ParamType> \\\n        (__VA_ARGS__)(info); \\\n  } \\\n  int gtest_##prefix##test_case_name##_dummy_ GTEST_ATTRIBUTE_UNUSED_ = \\\n      ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \\\n          GetTestCasePatternHolder<test_case_name>(\\\n              #test_case_name, \\\n              ::testing::internal::CodeLocation(\\\n                  __FILE__, __LINE__))->AddTestCaseInstantiation(\\\n                      #prefix, \\\n                      &gtest_##prefix##test_case_name##_EvalGenerator_, \\\n                      &gtest_##prefix##test_case_name##_EvalGenerateName_, \\\n                      __FILE__, __LINE__)\n\n}  // namespace testing\n\n#endif  // GTEST_HAS_PARAM_TEST\n\n#endif  // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_\n// Copyright 2006, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan)\n//\n// Google C++ Testing Framework definitions useful in production code.\n\n#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_\n#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_\n\n// When you need to test the private or protected members of a class,\n// use the FRIEND_TEST macro to declare your tests as friends of the\n// class.  For example:\n//\n// class MyClass {\n//  private:\n//   void MyMethod();\n//   FRIEND_TEST(MyClassTest, MyMethod);\n// };\n//\n// class MyClassTest : public testing::Test {\n//   // ...\n// };\n//\n// TEST_F(MyClassTest, MyMethod) {\n//   // Can call MyClass::MyMethod() here.\n// }\n\n#define FRIEND_TEST(test_case_name, test_name)\\\nfriend class test_case_name##_##test_name##_Test\n\n#endif  // GTEST_INCLUDE_GTEST_GTEST_PROD_H_\n// Copyright 2008, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: mheule@google.com (Markus Heule)\n//\n\n#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_\n#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_\n\n#include <iosfwd>\n#include <vector>\n\nnamespace testing {\n\n// A copyable object representing the result of a test part (i.e. an\n// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).\n//\n// Don't inherit from TestPartResult as its destructor is not virtual.\nclass GTEST_API_ TestPartResult {\n public:\n  // The possible outcomes of a test part (i.e. an assertion or an\n  // explicit SUCCEED(), FAIL(), or ADD_FAILURE()).\n  enum Type {\n    kSuccess,          // Succeeded.\n    kNonFatalFailure,  // Failed but the test can continue.\n    kFatalFailure      // Failed and the test should be terminated.\n  };\n\n  // C'tor.  TestPartResult does NOT have a default constructor.\n  // Always use this constructor (with parameters) to create a\n  // TestPartResult object.\n  TestPartResult(Type a_type,\n                 const char* a_file_name,\n                 int a_line_number,\n                 const char* a_message)\n      : type_(a_type),\n        file_name_(a_file_name == NULL ? \"\" : a_file_name),\n        line_number_(a_line_number),\n        summary_(ExtractSummary(a_message)),\n        message_(a_message) {\n  }\n\n  // Gets the outcome of the test part.\n  Type type() const { return type_; }\n\n  // Gets the name of the source file where the test part took place, or\n  // NULL if it's unknown.\n  const char* file_name() const {\n    return file_name_.empty() ? NULL : file_name_.c_str();\n  }\n\n  // Gets the line in the source file where the test part took place,\n  // or -1 if it's unknown.\n  int line_number() const { return line_number_; }\n\n  // Gets the summary of the failure message.\n  const char* summary() const { return summary_.c_str(); }\n\n  // Gets the message associated with the test part.\n  const char* message() const { return message_.c_str(); }\n\n  // Returns true iff the test part passed.\n  bool passed() const { return type_ == kSuccess; }\n\n  // Returns true iff the test part failed.\n  bool failed() const { return type_ != kSuccess; }\n\n  // Returns true iff the test part non-fatally failed.\n  bool nonfatally_failed() const { return type_ == kNonFatalFailure; }\n\n  // Returns true iff the test part fatally failed.\n  bool fatally_failed() const { return type_ == kFatalFailure; }\n\n private:\n  Type type_;\n\n  // Gets the summary of the failure message by omitting the stack\n  // trace in it.\n  static std::string ExtractSummary(const char* message);\n\n  // The name of the source file where the test part took place, or\n  // \"\" if the source file is unknown.\n  std::string file_name_;\n  // The line in the source file where the test part took place, or -1\n  // if the line number is unknown.\n  int line_number_;\n  std::string summary_;  // The test failure summary.\n  std::string message_;  // The test failure message.\n};\n\n// Prints a TestPartResult object.\nstd::ostream& operator<<(std::ostream& os, const TestPartResult& result);\n\n// An array of TestPartResult objects.\n//\n// Don't inherit from TestPartResultArray as its destructor is not\n// virtual.\nclass GTEST_API_ TestPartResultArray {\n public:\n  TestPartResultArray() {}\n\n  // Appends the given TestPartResult to the array.\n  void Append(const TestPartResult& result);\n\n  // Returns the TestPartResult at the given index (0-based).\n  const TestPartResult& GetTestPartResult(int index) const;\n\n  // Returns the number of TestPartResult objects in the array.\n  int size() const;\n\n private:\n  std::vector<TestPartResult> array_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray);\n};\n\n// This interface knows how to report a test part result.\nclass TestPartResultReporterInterface {\n public:\n  virtual ~TestPartResultReporterInterface() {}\n\n  virtual void ReportTestPartResult(const TestPartResult& result) = 0;\n};\n\nnamespace internal {\n\n// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a\n// statement generates new fatal failures. To do so it registers itself as the\n// current test part result reporter. Besides checking if fatal failures were\n// reported, it only delegates the reporting to the former result reporter.\n// The original result reporter is restored in the destructor.\n// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\nclass GTEST_API_ HasNewFatalFailureHelper\n    : public TestPartResultReporterInterface {\n public:\n  HasNewFatalFailureHelper();\n  virtual ~HasNewFatalFailureHelper();\n  virtual void ReportTestPartResult(const TestPartResult& result);\n  bool has_new_fatal_failure() const { return has_new_fatal_failure_; }\n private:\n  bool has_new_fatal_failure_;\n  TestPartResultReporterInterface* original_reporter_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper);\n};\n\n}  // namespace internal\n\n}  // namespace testing\n\n#endif  // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_\n// Copyright 2008 Google Inc.\n// All Rights Reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan)\n\n#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_\n#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_\n\n// This header implements typed tests and type-parameterized tests.\n\n// Typed (aka type-driven) tests repeat the same test for types in a\n// list.  You must know which types you want to test with when writing\n// typed tests. Here's how you do it:\n\n#if 0\n\n// First, define a fixture class template.  It should be parameterized\n// by a type.  Remember to derive it from testing::Test.\ntemplate <typename T>\nclass FooTest : public testing::Test {\n public:\n  ...\n  typedef std::list<T> List;\n  static T shared_;\n  T value_;\n};\n\n// Next, associate a list of types with the test case, which will be\n// repeated for each type in the list.  The typedef is necessary for\n// the macro to parse correctly.\ntypedef testing::Types<char, int, unsigned int> MyTypes;\nTYPED_TEST_CASE(FooTest, MyTypes);\n\n// If the type list contains only one type, you can write that type\n// directly without Types<...>:\n//   TYPED_TEST_CASE(FooTest, int);\n\n// Then, use TYPED_TEST() instead of TEST_F() to define as many typed\n// tests for this test case as you want.\nTYPED_TEST(FooTest, DoesBlah) {\n  // Inside a test, refer to TypeParam to get the type parameter.\n  // Since we are inside a derived class template, C++ requires use to\n  // visit the members of FooTest via 'this'.\n  TypeParam n = this->value_;\n\n  // To visit static members of the fixture, add the TestFixture::\n  // prefix.\n  n += TestFixture::shared_;\n\n  // To refer to typedefs in the fixture, add the \"typename\n  // TestFixture::\" prefix.\n  typename TestFixture::List values;\n  values.push_back(n);\n  ...\n}\n\nTYPED_TEST(FooTest, HasPropertyA) { ... }\n\n#endif  // 0\n\n// Type-parameterized tests are abstract test patterns parameterized\n// by a type.  Compared with typed tests, type-parameterized tests\n// allow you to define the test pattern without knowing what the type\n// parameters are.  The defined pattern can be instantiated with\n// different types any number of times, in any number of translation\n// units.\n//\n// If you are designing an interface or concept, you can define a\n// suite of type-parameterized tests to verify properties that any\n// valid implementation of the interface/concept should have.  Then,\n// each implementation can easily instantiate the test suite to verify\n// that it conforms to the requirements, without having to write\n// similar tests repeatedly.  Here's an example:\n\n#if 0\n\n// First, define a fixture class template.  It should be parameterized\n// by a type.  Remember to derive it from testing::Test.\ntemplate <typename T>\nclass FooTest : public testing::Test {\n  ...\n};\n\n// Next, declare that you will define a type-parameterized test case\n// (the _P suffix is for \"parameterized\" or \"pattern\", whichever you\n// prefer):\nTYPED_TEST_CASE_P(FooTest);\n\n// Then, use TYPED_TEST_P() to define as many type-parameterized tests\n// for this type-parameterized test case as you want.\nTYPED_TEST_P(FooTest, DoesBlah) {\n  // Inside a test, refer to TypeParam to get the type parameter.\n  TypeParam n = 0;\n  ...\n}\n\nTYPED_TEST_P(FooTest, HasPropertyA) { ... }\n\n// Now the tricky part: you need to register all test patterns before\n// you can instantiate them.  The first argument of the macro is the\n// test case name; the rest are the names of the tests in this test\n// case.\nREGISTER_TYPED_TEST_CASE_P(FooTest,\n                           DoesBlah, HasPropertyA);\n\n// Finally, you are free to instantiate the pattern with the types you\n// want.  If you put the above code in a header file, you can #include\n// it in multiple C++ source files and instantiate it multiple times.\n//\n// To distinguish different instances of the pattern, the first\n// argument to the INSTANTIATE_* macro is a prefix that will be added\n// to the actual test case name.  Remember to pick unique prefixes for\n// different instances.\ntypedef testing::Types<char, int, unsigned int> MyTypes;\nINSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes);\n\n// If the type list contains only one type, you can write that type\n// directly without Types<...>:\n//   INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, int);\n\n#endif  // 0\n\n\n// Implements typed tests.\n\n#if GTEST_HAS_TYPED_TEST\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// Expands to the name of the typedef for the type parameters of the\n// given test case.\n# define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_\n\n// The 'Types' template argument below must have spaces around it\n// since some compilers may choke on '>>' when passing a template\n// instance (e.g. Types<int>)\n# define TYPED_TEST_CASE(CaseName, Types) \\\n  typedef ::testing::internal::TypeList< Types >::type \\\n      GTEST_TYPE_PARAMS_(CaseName)\n\n# define TYPED_TEST(CaseName, TestName) \\\n  template <typename gtest_TypeParam_> \\\n  class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \\\n      : public CaseName<gtest_TypeParam_> { \\\n   private: \\\n    typedef CaseName<gtest_TypeParam_> TestFixture; \\\n    typedef gtest_TypeParam_ TypeParam; \\\n    virtual void TestBody(); \\\n  }; \\\n  bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \\\n      ::testing::internal::TypeParameterizedTest< \\\n          CaseName, \\\n          ::testing::internal::TemplateSel< \\\n              GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \\\n          GTEST_TYPE_PARAMS_(CaseName)>::Register(\\\n              \"\", ::testing::internal::CodeLocation(__FILE__, __LINE__), \\\n              #CaseName, #TestName, 0); \\\n  template <typename gtest_TypeParam_> \\\n  void GTEST_TEST_CLASS_NAME_(CaseName, TestName)<gtest_TypeParam_>::TestBody()\n\n#endif  // GTEST_HAS_TYPED_TEST\n\n// Implements type-parameterized tests.\n\n#if GTEST_HAS_TYPED_TEST_P\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// Expands to the namespace name that the type-parameterized tests for\n// the given type-parameterized test case are defined in.  The exact\n// name of the namespace is subject to change without notice.\n# define GTEST_CASE_NAMESPACE_(TestCaseName) \\\n  gtest_case_##TestCaseName##_\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n//\n// Expands to the name of the variable used to remember the names of\n// the defined tests in the given test case.\n# define GTEST_TYPED_TEST_CASE_P_STATE_(TestCaseName) \\\n  gtest_typed_test_case_p_state_##TestCaseName##_\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY.\n//\n// Expands to the name of the variable used to remember the names of\n// the registered tests in the given test case.\n# define GTEST_REGISTERED_TEST_NAMES_(TestCaseName) \\\n  gtest_registered_test_names_##TestCaseName##_\n\n// The variables defined in the type-parameterized test macros are\n// static as typically these macros are used in a .h file that can be\n// #included in multiple translation units linked together.\n# define TYPED_TEST_CASE_P(CaseName) \\\n  static ::testing::internal::TypedTestCasePState \\\n      GTEST_TYPED_TEST_CASE_P_STATE_(CaseName)\n\n# define TYPED_TEST_P(CaseName, TestName) \\\n  namespace GTEST_CASE_NAMESPACE_(CaseName) { \\\n  template <typename gtest_TypeParam_> \\\n  class TestName : public CaseName<gtest_TypeParam_> { \\\n   private: \\\n    typedef CaseName<gtest_TypeParam_> TestFixture; \\\n    typedef gtest_TypeParam_ TypeParam; \\\n    virtual void TestBody(); \\\n  }; \\\n  static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \\\n      GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).AddTestName(\\\n          __FILE__, __LINE__, #CaseName, #TestName); \\\n  } \\\n  template <typename gtest_TypeParam_> \\\n  void GTEST_CASE_NAMESPACE_(CaseName)::TestName<gtest_TypeParam_>::TestBody()\n\n# define REGISTER_TYPED_TEST_CASE_P(CaseName, ...) \\\n  namespace GTEST_CASE_NAMESPACE_(CaseName) { \\\n  typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \\\n  } \\\n  static const char* const GTEST_REGISTERED_TEST_NAMES_(CaseName) = \\\n      GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\\\n          __FILE__, __LINE__, #__VA_ARGS__)\n\n// The 'Types' template argument below must have spaces around it\n// since some compilers may choke on '>>' when passing a template\n// instance (e.g. Types<int>)\n# define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \\\n  bool gtest_##Prefix##_##CaseName GTEST_ATTRIBUTE_UNUSED_ = \\\n      ::testing::internal::TypeParameterizedTestCase<CaseName, \\\n          GTEST_CASE_NAMESPACE_(CaseName)::gtest_AllTests_, \\\n          ::testing::internal::TypeList< Types >::type>::Register(\\\n              #Prefix, \\\n              ::testing::internal::CodeLocation(__FILE__, __LINE__), \\\n              &GTEST_TYPED_TEST_CASE_P_STATE_(CaseName), \\\n              #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName))\n\n#endif  // GTEST_HAS_TYPED_TEST_P\n\n#endif  // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_\n\n// Depending on the platform, different string classes are available.\n// On Linux, in addition to ::std::string, Google also makes use of\n// class ::string, which has the same interface as ::std::string, but\n// has a different implementation.\n//\n// You can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that\n// ::string is available AND is a distinct type to ::std::string, or\n// define it to 0 to indicate otherwise.\n//\n// If ::std::string and ::string are the same class on your platform\n// due to aliasing, you should define GTEST_HAS_GLOBAL_STRING to 0.\n//\n// If you do not define GTEST_HAS_GLOBAL_STRING, it is defined\n// heuristically.\n\nnamespace testing {\n\n// Declares the flags.\n\n// This flag temporary enables the disabled tests.\nGTEST_DECLARE_bool_(also_run_disabled_tests);\n\n// This flag brings the debugger on an assertion failure.\nGTEST_DECLARE_bool_(break_on_failure);\n\n// This flag controls whether Google Test catches all test-thrown exceptions\n// and logs them as failures.\nGTEST_DECLARE_bool_(catch_exceptions);\n\n// This flag enables using colors in terminal output. Available values are\n// \"yes\" to enable colors, \"no\" (disable colors), or \"auto\" (the default)\n// to let Google Test decide.\nGTEST_DECLARE_string_(color);\n\n// This flag sets up the filter to select by name using a glob pattern\n// the tests to run. If the filter is not given all tests are executed.\nGTEST_DECLARE_string_(filter);\n\n// This flag causes the Google Test to list tests. None of the tests listed\n// are actually run if the flag is provided.\nGTEST_DECLARE_bool_(list_tests);\n\n// This flag controls whether Google Test emits a detailed XML report to a file\n// in addition to its normal textual output.\nGTEST_DECLARE_string_(output);\n\n// This flags control whether Google Test prints the elapsed time for each\n// test.\nGTEST_DECLARE_bool_(print_time);\n\n// This flag specifies the random number seed.\nGTEST_DECLARE_int32_(random_seed);\n\n// This flag sets how many times the tests are repeated. The default value\n// is 1. If the value is -1 the tests are repeating forever.\nGTEST_DECLARE_int32_(repeat);\n\n// This flag controls whether Google Test includes Google Test internal\n// stack frames in failure stack traces.\nGTEST_DECLARE_bool_(show_internal_stack_frames);\n\n// When this flag is specified, tests' order is randomized on every iteration.\nGTEST_DECLARE_bool_(shuffle);\n\n// This flag specifies the maximum number of stack frames to be\n// printed in a failure message.\nGTEST_DECLARE_int32_(stack_trace_depth);\n\n// When this flag is specified, a failed assertion will throw an\n// exception if exceptions are enabled, or exit the program with a\n// non-zero code otherwise.\nGTEST_DECLARE_bool_(throw_on_failure);\n\n// When this flag is set with a \"host:port\" string, on supported\n// platforms test results are streamed to the specified port on\n// the specified host machine.\nGTEST_DECLARE_string_(stream_result_to);\n\n// The upper limit for valid stack trace depths.\nconst int kMaxStackTraceDepth = 100;\n\nnamespace internal {\n\nclass AssertHelper;\nclass DefaultGlobalTestPartResultReporter;\nclass ExecDeathTest;\nclass NoExecDeathTest;\nclass FinalSuccessChecker;\nclass GTestFlagSaver;\nclass StreamingListenerTest;\nclass TestResultAccessor;\nclass TestEventListenersAccessor;\nclass TestEventRepeater;\nclass UnitTestRecordPropertyTestHelper;\nclass WindowsDeathTest;\nclass UnitTestImpl* GetUnitTestImpl();\nvoid ReportFailureInUnknownLocation(TestPartResult::Type result_type,\n                                    const std::string& message);\n\n}  // namespace internal\n\n// The friend relationship of some of these classes is cyclic.\n// If we don't forward declare them the compiler might confuse the classes\n// in friendship clauses with same named classes on the scope.\nclass Test;\nclass TestCase;\nclass TestInfo;\nclass UnitTest;\n\n// A class for indicating whether an assertion was successful.  When\n// the assertion wasn't successful, the AssertionResult object\n// remembers a non-empty message that describes how it failed.\n//\n// To create an instance of this class, use one of the factory functions\n// (AssertionSuccess() and AssertionFailure()).\n//\n// This class is useful for two purposes:\n//   1. Defining predicate functions to be used with Boolean test assertions\n//      EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts\n//   2. Defining predicate-format functions to be\n//      used with predicate assertions (ASSERT_PRED_FORMAT*, etc).\n//\n// For example, if you define IsEven predicate:\n//\n//   testing::AssertionResult IsEven(int n) {\n//     if ((n % 2) == 0)\n//       return testing::AssertionSuccess();\n//     else\n//       return testing::AssertionFailure() << n << \" is odd\";\n//   }\n//\n// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5)))\n// will print the message\n//\n//   Value of: IsEven(Fib(5))\n//     Actual: false (5 is odd)\n//   Expected: true\n//\n// instead of a more opaque\n//\n//   Value of: IsEven(Fib(5))\n//     Actual: false\n//   Expected: true\n//\n// in case IsEven is a simple Boolean predicate.\n//\n// If you expect your predicate to be reused and want to support informative\n// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up\n// about half as often as positive ones in our tests), supply messages for\n// both success and failure cases:\n//\n//   testing::AssertionResult IsEven(int n) {\n//     if ((n % 2) == 0)\n//       return testing::AssertionSuccess() << n << \" is even\";\n//     else\n//       return testing::AssertionFailure() << n << \" is odd\";\n//   }\n//\n// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print\n//\n//   Value of: IsEven(Fib(6))\n//     Actual: true (8 is even)\n//   Expected: false\n//\n// NB: Predicates that support negative Boolean assertions have reduced\n// performance in positive ones so be careful not to use them in tests\n// that have lots (tens of thousands) of positive Boolean assertions.\n//\n// To use this class with EXPECT_PRED_FORMAT assertions such as:\n//\n//   // Verifies that Foo() returns an even number.\n//   EXPECT_PRED_FORMAT1(IsEven, Foo());\n//\n// you need to define:\n//\n//   testing::AssertionResult IsEven(const char* expr, int n) {\n//     if ((n % 2) == 0)\n//       return testing::AssertionSuccess();\n//     else\n//       return testing::AssertionFailure()\n//         << \"Expected: \" << expr << \" is even\\n  Actual: it's \" << n;\n//   }\n//\n// If Foo() returns 5, you will see the following message:\n//\n//   Expected: Foo() is even\n//     Actual: it's 5\n//\nclass GTEST_API_ AssertionResult {\n public:\n  // Copy constructor.\n  // Used in EXPECT_TRUE/FALSE(assertion_result).\n  AssertionResult(const AssertionResult& other);\n\n  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 /* forcing value to bool */)\n\n  // Used in the EXPECT_TRUE/FALSE(bool_expression).\n  //\n  // T must be contextually convertible to bool.\n  //\n  // The second parameter prevents this overload from being considered if\n  // the argument is implicitly convertible to AssertionResult. In that case\n  // we want AssertionResult's copy constructor to be used.\n  template <typename T>\n  explicit AssertionResult(\n      const T& success,\n      typename internal::EnableIf<\n          !internal::ImplicitlyConvertible<T, AssertionResult>::value>::type*\n          /*enabler*/ = NULL)\n      : success_(success) {}\n\n  GTEST_DISABLE_MSC_WARNINGS_POP_()\n\n  // Assignment operator.\n  AssertionResult& operator=(AssertionResult other) {\n    swap(other);\n    return *this;\n  }\n\n  // Returns true iff the assertion succeeded.\n  operator bool() const { return success_; }  // NOLINT\n\n  // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.\n  AssertionResult operator!() const;\n\n  // Returns the text streamed into this AssertionResult. Test assertions\n  // use it when they fail (i.e., the predicate's outcome doesn't match the\n  // assertion's expectation). When nothing has been streamed into the\n  // object, returns an empty string.\n  const char* message() const {\n    return message_.get() != NULL ?  message_->c_str() : \"\";\n  }\n  // TODO(vladl@google.com): Remove this after making sure no clients use it.\n  // Deprecated; please use message() instead.\n  const char* failure_message() const { return message(); }\n\n  // Streams a custom failure message into this object.\n  template <typename T> AssertionResult& operator<<(const T& value) {\n    AppendMessage(Message() << value);\n    return *this;\n  }\n\n  // Allows streaming basic output manipulators such as endl or flush into\n  // this object.\n  AssertionResult& operator<<(\n      ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) {\n    AppendMessage(Message() << basic_manipulator);\n    return *this;\n  }\n\n private:\n  // Appends the contents of message to message_.\n  void AppendMessage(const Message& a_message) {\n    if (message_.get() == NULL)\n      message_.reset(new ::std::string);\n    message_->append(a_message.GetString().c_str());\n  }\n\n  // Swap the contents of this AssertionResult with other.\n  void swap(AssertionResult& other);\n\n  // Stores result of the assertion predicate.\n  bool success_;\n  // Stores the message describing the condition in case the expectation\n  // construct is not satisfied with the predicate's outcome.\n  // Referenced via a pointer to avoid taking too much stack frame space\n  // with test assertions.\n  internal::scoped_ptr< ::std::string> message_;\n};\n\n// Makes a successful assertion result.\nGTEST_API_ AssertionResult AssertionSuccess();\n\n// Makes a failed assertion result.\nGTEST_API_ AssertionResult AssertionFailure();\n\n// Makes a failed assertion result with the given failure message.\n// Deprecated; use AssertionFailure() << msg.\nGTEST_API_ AssertionResult AssertionFailure(const Message& msg);\n\n// The abstract class that all tests inherit from.\n//\n// In Google Test, a unit test program contains one or many TestCases, and\n// each TestCase contains one or many Tests.\n//\n// When you define a test using the TEST macro, you don't need to\n// explicitly derive from Test - the TEST macro automatically does\n// this for you.\n//\n// The only time you derive from Test is when defining a test fixture\n// to be used a TEST_F.  For example:\n//\n//   class FooTest : public testing::Test {\n//    protected:\n//     void SetUp() override { ... }\n//     void TearDown() override { ... }\n//     ...\n//   };\n//\n//   TEST_F(FooTest, Bar) { ... }\n//   TEST_F(FooTest, Baz) { ... }\n//\n// Test is not copyable.\nclass GTEST_API_ Test {\n public:\n  friend class TestInfo;\n\n  // Defines types for pointers to functions that set up and tear down\n  // a test case.\n  typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc;\n  typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc;\n\n  // The d'tor is virtual as we intend to inherit from Test.\n  virtual ~Test();\n\n  // Sets up the stuff shared by all tests in this test case.\n  //\n  // Google Test will call Foo::SetUpTestCase() before running the first\n  // test in test case Foo.  Hence a sub-class can define its own\n  // SetUpTestCase() method to shadow the one defined in the super\n  // class.\n  static void SetUpTestCase() {}\n\n  // Tears down the stuff shared by all tests in this test case.\n  //\n  // Google Test will call Foo::TearDownTestCase() after running the last\n  // test in test case Foo.  Hence a sub-class can define its own\n  // TearDownTestCase() method to shadow the one defined in the super\n  // class.\n  static void TearDownTestCase() {}\n\n  // Returns true iff the current test has a fatal failure.\n  static bool HasFatalFailure();\n\n  // Returns true iff the current test has a non-fatal failure.\n  static bool HasNonfatalFailure();\n\n  // Returns true iff the current test has a (either fatal or\n  // non-fatal) failure.\n  static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); }\n\n  // Logs a property for the current test, test case, or for the entire\n  // invocation of the test program when used outside of the context of a\n  // test case.  Only the last value for a given key is remembered.  These\n  // are public static so they can be called from utility functions that are\n  // not members of the test fixture.  Calls to RecordProperty made during\n  // lifespan of the test (from the moment its constructor starts to the\n  // moment its destructor finishes) will be output in XML as attributes of\n  // the <testcase> element.  Properties recorded from fixture's\n  // SetUpTestCase or TearDownTestCase are logged as attributes of the\n  // corresponding <testsuite> element.  Calls to RecordProperty made in the\n  // global context (before or after invocation of RUN_ALL_TESTS and from\n  // SetUp/TearDown method of Environment objects registered with Google\n  // Test) will be output as attributes of the <testsuites> element.\n  static void RecordProperty(const std::string& key, const std::string& value);\n  static void RecordProperty(const std::string& key, int value);\n\n protected:\n  // Creates a Test object.\n  Test();\n\n  // Sets up the test fixture.\n  virtual void SetUp();\n\n  // Tears down the test fixture.\n  virtual void TearDown();\n\n private:\n  // Returns true iff the current test has the same fixture class as\n  // the first test in the current test case.\n  static bool HasSameFixtureClass();\n\n  // Runs the test after the test fixture has been set up.\n  //\n  // A sub-class must implement this to define the test logic.\n  //\n  // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM.\n  // Instead, use the TEST or TEST_F macro.\n  virtual void TestBody() = 0;\n\n  // Sets up, executes, and tears down the test.\n  void Run();\n\n  // Deletes self.  We deliberately pick an unusual name for this\n  // internal method to avoid clashing with names used in user TESTs.\n  void DeleteSelf_() { delete this; }\n\n  const internal::scoped_ptr< GTEST_FLAG_SAVER_ > gtest_flag_saver_;\n\n  // Often a user misspells SetUp() as Setup() and spends a long time\n  // wondering why it is never called by Google Test.  The declaration of\n  // the following method is solely for catching such an error at\n  // compile time:\n  //\n  //   - The return type is deliberately chosen to be not void, so it\n  //   will be a conflict if void Setup() is declared in the user's\n  //   test fixture.\n  //\n  //   - This method is private, so it will be another compiler error\n  //   if the method is called from the user's test fixture.\n  //\n  // DO NOT OVERRIDE THIS FUNCTION.\n  //\n  // If you see an error about overriding the following function or\n  // about it being private, you have mis-spelled SetUp() as Setup().\n  struct Setup_should_be_spelled_SetUp {};\n  virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }\n\n  // We disallow copying Tests.\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(Test);\n};\n\ntypedef internal::TimeInMillis TimeInMillis;\n\n// A copyable object representing a user specified test property which can be\n// output as a key/value string pair.\n//\n// Don't inherit from TestProperty as its destructor is not virtual.\nclass TestProperty {\n public:\n  // C'tor.  TestProperty does NOT have a default constructor.\n  // Always use this constructor (with parameters) to create a\n  // TestProperty object.\n  TestProperty(const std::string& a_key, const std::string& a_value) :\n    key_(a_key), value_(a_value) {\n  }\n\n  // Gets the user supplied key.\n  const char* key() const {\n    return key_.c_str();\n  }\n\n  // Gets the user supplied value.\n  const char* value() const {\n    return value_.c_str();\n  }\n\n  // Sets a new value, overriding the one supplied in the constructor.\n  void SetValue(const std::string& new_value) {\n    value_ = new_value;\n  }\n\n private:\n  // The key supplied by the user.\n  std::string key_;\n  // The value supplied by the user.\n  std::string value_;\n};\n\n// The result of a single Test.  This includes a list of\n// TestPartResults, a list of TestProperties, a count of how many\n// death tests there are in the Test, and how much time it took to run\n// the Test.\n//\n// TestResult is not copyable.\nclass GTEST_API_ TestResult {\n public:\n  // Creates an empty TestResult.\n  TestResult();\n\n  // D'tor.  Do not inherit from TestResult.\n  ~TestResult();\n\n  // Gets the number of all test parts.  This is the sum of the number\n  // of successful test parts and the number of failed test parts.\n  int total_part_count() const;\n\n  // Returns the number of the test properties.\n  int test_property_count() const;\n\n  // Returns true iff the test passed (i.e. no test part failed).\n  bool Passed() const { return !Failed(); }\n\n  // Returns true iff the test failed.\n  bool Failed() const;\n\n  // Returns true iff the test fatally failed.\n  bool HasFatalFailure() const;\n\n  // Returns true iff the test has a non-fatal failure.\n  bool HasNonfatalFailure() const;\n\n  // Returns the elapsed time, in milliseconds.\n  TimeInMillis elapsed_time() const { return elapsed_time_; }\n\n  // Returns the i-th test part result among all the results. i can range\n  // from 0 to test_property_count() - 1. If i is not in that range, aborts\n  // the program.\n  const TestPartResult& GetTestPartResult(int i) const;\n\n  // Returns the i-th test property. i can range from 0 to\n  // test_property_count() - 1. If i is not in that range, aborts the\n  // program.\n  const TestProperty& GetTestProperty(int i) const;\n\n private:\n  friend class TestInfo;\n  friend class TestCase;\n  friend class UnitTest;\n  friend class internal::DefaultGlobalTestPartResultReporter;\n  friend class internal::ExecDeathTest;\n  friend class internal::TestResultAccessor;\n  friend class internal::UnitTestImpl;\n  friend class internal::WindowsDeathTest;\n\n  // Gets the vector of TestPartResults.\n  const std::vector<TestPartResult>& test_part_results() const {\n    return test_part_results_;\n  }\n\n  // Gets the vector of TestProperties.\n  const std::vector<TestProperty>& test_properties() const {\n    return test_properties_;\n  }\n\n  // Sets the elapsed time.\n  void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; }\n\n  // Adds a test property to the list. The property is validated and may add\n  // a non-fatal failure if invalid (e.g., if it conflicts with reserved\n  // key names). If a property is already recorded for the same key, the\n  // value will be updated, rather than storing multiple values for the same\n  // key.  xml_element specifies the element for which the property is being\n  // recorded and is used for validation.\n  void RecordProperty(const std::string& xml_element,\n                      const TestProperty& test_property);\n\n  // Adds a failure if the key is a reserved attribute of Google Test\n  // testcase tags.  Returns true if the property is valid.\n  // TODO(russr): Validate attribute names are legal and human readable.\n  static bool ValidateTestProperty(const std::string& xml_element,\n                                   const TestProperty& test_property);\n\n  // Adds a test part result to the list.\n  void AddTestPartResult(const TestPartResult& test_part_result);\n\n  // Returns the death test count.\n  int death_test_count() const { return death_test_count_; }\n\n  // Increments the death test count, returning the new count.\n  int increment_death_test_count() { return ++death_test_count_; }\n\n  // Clears the test part results.\n  void ClearTestPartResults();\n\n  // Clears the object.\n  void Clear();\n\n  // Protects mutable state of the property vector and of owned\n  // properties, whose values may be updated.\n  internal::Mutex test_properites_mutex_;\n\n  // The vector of TestPartResults\n  std::vector<TestPartResult> test_part_results_;\n  // The vector of TestProperties\n  std::vector<TestProperty> test_properties_;\n  // Running count of death tests.\n  int death_test_count_;\n  // The elapsed time, in milliseconds.\n  TimeInMillis elapsed_time_;\n\n  // We disallow copying TestResult.\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult);\n};  // class TestResult\n\n// A TestInfo object stores the following information about a test:\n//\n//   Test case name\n//   Test name\n//   Whether the test should be run\n//   A function pointer that creates the test object when invoked\n//   Test result\n//\n// The constructor of TestInfo registers itself with the UnitTest\n// singleton such that the RUN_ALL_TESTS() macro knows which tests to\n// run.\nclass GTEST_API_ TestInfo {\n public:\n  // Destructs a TestInfo object.  This function is not virtual, so\n  // don't inherit from TestInfo.\n  ~TestInfo();\n\n  // Returns the test case name.\n  const char* test_case_name() const { return test_case_name_.c_str(); }\n\n  // Returns the test name.\n  const char* name() const { return name_.c_str(); }\n\n  // Returns the name of the parameter type, or NULL if this is not a typed\n  // or a type-parameterized test.\n  const char* type_param() const {\n    if (type_param_.get() != NULL)\n      return type_param_->c_str();\n    return NULL;\n  }\n\n  // Returns the text representation of the value parameter, or NULL if this\n  // is not a value-parameterized test.\n  const char* value_param() const {\n    if (value_param_.get() != NULL)\n      return value_param_->c_str();\n    return NULL;\n  }\n\n  // Returns the file name where this test is defined.\n  const char* file() const { return location_.file.c_str(); }\n\n  // Returns the line where this test is defined.\n  int line() const { return location_.line; }\n\n  // Returns true if this test should run, that is if the test is not\n  // disabled (or it is disabled but the also_run_disabled_tests flag has\n  // been specified) and its full name matches the user-specified filter.\n  //\n  // Google Test allows the user to filter the tests by their full names.\n  // The full name of a test Bar in test case Foo is defined as\n  // \"Foo.Bar\".  Only the tests that match the filter will run.\n  //\n  // A filter is a colon-separated list of glob (not regex) patterns,\n  // optionally followed by a '-' and a colon-separated list of\n  // negative patterns (tests to exclude).  A test is run if it\n  // matches one of the positive patterns and does not match any of\n  // the negative patterns.\n  //\n  // For example, *A*:Foo.* is a filter that matches any string that\n  // contains the character 'A' or starts with \"Foo.\".\n  bool should_run() const { return should_run_; }\n\n  // Returns true iff this test will appear in the XML report.\n  bool is_reportable() const {\n    // For now, the XML report includes all tests matching the filter.\n    // In the future, we may trim tests that are excluded because of\n    // sharding.\n    return matches_filter_;\n  }\n\n  // Returns the result of the test.\n  const TestResult* result() const { return &result_; }\n\n private:\n#if GTEST_HAS_DEATH_TEST\n  friend class internal::DefaultDeathTestFactory;\n#endif  // GTEST_HAS_DEATH_TEST\n  friend class Test;\n  friend class TestCase;\n  friend class internal::UnitTestImpl;\n  friend class internal::StreamingListenerTest;\n  friend TestInfo* internal::MakeAndRegisterTestInfo(\n      const char* test_case_name,\n      const char* name,\n      const char* type_param,\n      const char* value_param,\n      internal::CodeLocation code_location,\n      internal::TypeId fixture_class_id,\n      Test::SetUpTestCaseFunc set_up_tc,\n      Test::TearDownTestCaseFunc tear_down_tc,\n      internal::TestFactoryBase* factory);\n\n  // Constructs a TestInfo object. The newly constructed instance assumes\n  // ownership of the factory object.\n  TestInfo(const std::string& test_case_name,\n           const std::string& name,\n           const char* a_type_param,   // NULL if not a type-parameterized test\n           const char* a_value_param,  // NULL if not a value-parameterized test\n           internal::CodeLocation a_code_location,\n           internal::TypeId fixture_class_id,\n           internal::TestFactoryBase* factory);\n\n  // Increments the number of death tests encountered in this test so\n  // far.\n  int increment_death_test_count() {\n    return result_.increment_death_test_count();\n  }\n\n  // Creates the test object, runs it, records its result, and then\n  // deletes it.\n  void Run();\n\n  static void ClearTestResult(TestInfo* test_info) {\n    test_info->result_.Clear();\n  }\n\n  // These fields are immutable properties of the test.\n  const std::string test_case_name_;     // Test case name\n  const std::string name_;               // Test name\n  // Name of the parameter type, or NULL if this is not a typed or a\n  // type-parameterized test.\n  const internal::scoped_ptr<const ::std::string> type_param_;\n  // Text representation of the value parameter, or NULL if this is not a\n  // value-parameterized test.\n  const internal::scoped_ptr<const ::std::string> value_param_;\n  internal::CodeLocation location_;\n  const internal::TypeId fixture_class_id_;   // ID of the test fixture class\n  bool should_run_;                 // True iff this test should run\n  bool is_disabled_;                // True iff this test is disabled\n  bool matches_filter_;             // True if this test matches the\n                                    // user-specified filter.\n  internal::TestFactoryBase* const factory_;  // The factory that creates\n                                              // the test object\n\n  // This field is mutable and needs to be reset before running the\n  // test for the second time.\n  TestResult result_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo);\n};\n\n// A test case, which consists of a vector of TestInfos.\n//\n// TestCase is not copyable.\nclass GTEST_API_ TestCase {\n public:\n  // Creates a TestCase with the given name.\n  //\n  // TestCase does NOT have a default constructor.  Always use this\n  // constructor to create a TestCase object.\n  //\n  // Arguments:\n  //\n  //   name:         name of the test case\n  //   a_type_param: the name of the test's type parameter, or NULL if\n  //                 this is not a type-parameterized test.\n  //   set_up_tc:    pointer to the function that sets up the test case\n  //   tear_down_tc: pointer to the function that tears down the test case\n  TestCase(const char* name, const char* a_type_param,\n           Test::SetUpTestCaseFunc set_up_tc,\n           Test::TearDownTestCaseFunc tear_down_tc);\n\n  // Destructor of TestCase.\n  virtual ~TestCase();\n\n  // Gets the name of the TestCase.\n  const char* name() const { return name_.c_str(); }\n\n  // Returns the name of the parameter type, or NULL if this is not a\n  // type-parameterized test case.\n  const char* type_param() const {\n    if (type_param_.get() != NULL)\n      return type_param_->c_str();\n    return NULL;\n  }\n\n  // Returns true if any test in this test case should run.\n  bool should_run() const { return should_run_; }\n\n  // Gets the number of successful tests in this test case.\n  int successful_test_count() const;\n\n  // Gets the number of failed tests in this test case.\n  int failed_test_count() const;\n\n  // Gets the number of disabled tests that will be reported in the XML report.\n  int reportable_disabled_test_count() const;\n\n  // Gets the number of disabled tests in this test case.\n  int disabled_test_count() const;\n\n  // Gets the number of tests to be printed in the XML report.\n  int reportable_test_count() const;\n\n  // Get the number of tests in this test case that should run.\n  int test_to_run_count() const;\n\n  // Gets the number of all tests in this test case.\n  int total_test_count() const;\n\n  // Returns true iff the test case passed.\n  bool Passed() const { return !Failed(); }\n\n  // Returns true iff the test case failed.\n  bool Failed() const { return failed_test_count() > 0; }\n\n  // Returns the elapsed time, in milliseconds.\n  TimeInMillis elapsed_time() const { return elapsed_time_; }\n\n  // Returns the i-th test among all the tests. i can range from 0 to\n  // total_test_count() - 1. If i is not in that range, returns NULL.\n  const TestInfo* GetTestInfo(int i) const;\n\n  // Returns the TestResult that holds test properties recorded during\n  // execution of SetUpTestCase and TearDownTestCase.\n  const TestResult& ad_hoc_test_result() const { return ad_hoc_test_result_; }\n\n private:\n  friend class Test;\n  friend class internal::UnitTestImpl;\n\n  // Gets the (mutable) vector of TestInfos in this TestCase.\n  std::vector<TestInfo*>& test_info_list() { return test_info_list_; }\n\n  // Gets the (immutable) vector of TestInfos in this TestCase.\n  const std::vector<TestInfo*>& test_info_list() const {\n    return test_info_list_;\n  }\n\n  // Returns the i-th test among all the tests. i can range from 0 to\n  // total_test_count() - 1. If i is not in that range, returns NULL.\n  TestInfo* GetMutableTestInfo(int i);\n\n  // Sets the should_run member.\n  void set_should_run(bool should) { should_run_ = should; }\n\n  // Adds a TestInfo to this test case.  Will delete the TestInfo upon\n  // destruction of the TestCase object.\n  void AddTestInfo(TestInfo * test_info);\n\n  // Clears the results of all tests in this test case.\n  void ClearResult();\n\n  // Clears the results of all tests in the given test case.\n  static void ClearTestCaseResult(TestCase* test_case) {\n    test_case->ClearResult();\n  }\n\n  // Runs every test in this TestCase.\n  void Run();\n\n  // Runs SetUpTestCase() for this TestCase.  This wrapper is needed\n  // for catching exceptions thrown from SetUpTestCase().\n  void RunSetUpTestCase() { (*set_up_tc_)(); }\n\n  // Runs TearDownTestCase() for this TestCase.  This wrapper is\n  // needed for catching exceptions thrown from TearDownTestCase().\n  void RunTearDownTestCase() { (*tear_down_tc_)(); }\n\n  // Returns true iff test passed.\n  static bool TestPassed(const TestInfo* test_info) {\n    return test_info->should_run() && test_info->result()->Passed();\n  }\n\n  // Returns true iff test failed.\n  static bool TestFailed(const TestInfo* test_info) {\n    return test_info->should_run() && test_info->result()->Failed();\n  }\n\n  // Returns true iff the test is disabled and will be reported in the XML\n  // report.\n  static bool TestReportableDisabled(const TestInfo* test_info) {\n    return test_info->is_reportable() && test_info->is_disabled_;\n  }\n\n  // Returns true iff test is disabled.\n  static bool TestDisabled(const TestInfo* test_info) {\n    return test_info->is_disabled_;\n  }\n\n  // Returns true iff this test will appear in the XML report.\n  static bool TestReportable(const TestInfo* test_info) {\n    return test_info->is_reportable();\n  }\n\n  // Returns true if the given test should run.\n  static bool ShouldRunTest(const TestInfo* test_info) {\n    return test_info->should_run();\n  }\n\n  // Shuffles the tests in this test case.\n  void ShuffleTests(internal::Random* random);\n\n  // Restores the test order to before the first shuffle.\n  void UnshuffleTests();\n\n  // Name of the test case.\n  std::string name_;\n  // Name of the parameter type, or NULL if this is not a typed or a\n  // type-parameterized test.\n  const internal::scoped_ptr<const ::std::string> type_param_;\n  // The vector of TestInfos in their original order.  It owns the\n  // elements in the vector.\n  std::vector<TestInfo*> test_info_list_;\n  // Provides a level of indirection for the test list to allow easy\n  // shuffling and restoring the test order.  The i-th element in this\n  // vector is the index of the i-th test in the shuffled test list.\n  std::vector<int> test_indices_;\n  // Pointer to the function that sets up the test case.\n  Test::SetUpTestCaseFunc set_up_tc_;\n  // Pointer to the function that tears down the test case.\n  Test::TearDownTestCaseFunc tear_down_tc_;\n  // True iff any test in this test case should run.\n  bool should_run_;\n  // Elapsed time, in milliseconds.\n  TimeInMillis elapsed_time_;\n  // Holds test properties recorded during execution of SetUpTestCase and\n  // TearDownTestCase.\n  TestResult ad_hoc_test_result_;\n\n  // We disallow copying TestCases.\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase);\n};\n\n// An Environment object is capable of setting up and tearing down an\n// environment.  You should subclass this to define your own\n// environment(s).\n//\n// An Environment object does the set-up and tear-down in virtual\n// methods SetUp() and TearDown() instead of the constructor and the\n// destructor, as:\n//\n//   1. You cannot safely throw from a destructor.  This is a problem\n//      as in some cases Google Test is used where exceptions are enabled, and\n//      we may want to implement ASSERT_* using exceptions where they are\n//      available.\n//   2. You cannot use ASSERT_* directly in a constructor or\n//      destructor.\nclass Environment {\n public:\n  // The d'tor is virtual as we need to subclass Environment.\n  virtual ~Environment() {}\n\n  // Override this to define how to set up the environment.\n  virtual void SetUp() {}\n\n  // Override this to define how to tear down the environment.\n  virtual void TearDown() {}\n private:\n  // If you see an error about overriding the following function or\n  // about it being private, you have mis-spelled SetUp() as Setup().\n  struct Setup_should_be_spelled_SetUp {};\n  virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }\n};\n\n// The interface for tracing execution of tests. The methods are organized in\n// the order the corresponding events are fired.\nclass TestEventListener {\n public:\n  virtual ~TestEventListener() {}\n\n  // Fired before any test activity starts.\n  virtual void OnTestProgramStart(const UnitTest& unit_test) = 0;\n\n  // Fired before each iteration of tests starts.  There may be more than\n  // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration\n  // index, starting from 0.\n  virtual void OnTestIterationStart(const UnitTest& unit_test,\n                                    int iteration) = 0;\n\n  // Fired before environment set-up for each iteration of tests starts.\n  virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0;\n\n  // Fired after environment set-up for each iteration of tests ends.\n  virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0;\n\n  // Fired before the test case starts.\n  virtual void OnTestCaseStart(const TestCase& test_case) = 0;\n\n  // Fired before the test starts.\n  virtual void OnTestStart(const TestInfo& test_info) = 0;\n\n  // Fired after a failed assertion or a SUCCEED() invocation.\n  virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0;\n\n  // Fired after the test ends.\n  virtual void OnTestEnd(const TestInfo& test_info) = 0;\n\n  // Fired after the test case ends.\n  virtual void OnTestCaseEnd(const TestCase& test_case) = 0;\n\n  // Fired before environment tear-down for each iteration of tests starts.\n  virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0;\n\n  // Fired after environment tear-down for each iteration of tests ends.\n  virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0;\n\n  // Fired after each iteration of tests finishes.\n  virtual void OnTestIterationEnd(const UnitTest& unit_test,\n                                  int iteration) = 0;\n\n  // Fired after all test activities have ended.\n  virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0;\n};\n\n// The convenience class for users who need to override just one or two\n// methods and are not concerned that a possible change to a signature of\n// the methods they override will not be caught during the build.  For\n// comments about each method please see the definition of TestEventListener\n// above.\nclass EmptyTestEventListener : public TestEventListener {\n public:\n  virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}\n  virtual void OnTestIterationStart(const UnitTest& /*unit_test*/,\n                                    int /*iteration*/) {}\n  virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {}\n  virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}\n  virtual void OnTestCaseStart(const TestCase& /*test_case*/) {}\n  virtual void OnTestStart(const TestInfo& /*test_info*/) {}\n  virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {}\n  virtual void OnTestEnd(const TestInfo& /*test_info*/) {}\n  virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {}\n  virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {}\n  virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}\n  virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/,\n                                  int /*iteration*/) {}\n  virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}\n};\n\n// TestEventListeners lets users add listeners to track events in Google Test.\nclass GTEST_API_ TestEventListeners {\n public:\n  TestEventListeners();\n  ~TestEventListeners();\n\n  // Appends an event listener to the end of the list. Google Test assumes\n  // the ownership of the listener (i.e. it will delete the listener when\n  // the test program finishes).\n  void Append(TestEventListener* listener);\n\n  // Removes the given event listener from the list and returns it.  It then\n  // becomes the caller's responsibility to delete the listener. Returns\n  // NULL if the listener is not found in the list.\n  TestEventListener* Release(TestEventListener* listener);\n\n  // Returns the standard listener responsible for the default console\n  // output.  Can be removed from the listeners list to shut down default\n  // console output.  Note that removing this object from the listener list\n  // with Release transfers its ownership to the caller and makes this\n  // function return NULL the next time.\n  TestEventListener* default_result_printer() const {\n    return default_result_printer_;\n  }\n\n  // Returns the standard listener responsible for the default XML output\n  // controlled by the --gtest_output=xml flag.  Can be removed from the\n  // listeners list by users who want to shut down the default XML output\n  // controlled by this flag and substitute it with custom one.  Note that\n  // removing this object from the listener list with Release transfers its\n  // ownership to the caller and makes this function return NULL the next\n  // time.\n  TestEventListener* default_xml_generator() const {\n    return default_xml_generator_;\n  }\n\n private:\n  friend class TestCase;\n  friend class TestInfo;\n  friend class internal::DefaultGlobalTestPartResultReporter;\n  friend class internal::NoExecDeathTest;\n  friend class internal::TestEventListenersAccessor;\n  friend class internal::UnitTestImpl;\n\n  // Returns repeater that broadcasts the TestEventListener events to all\n  // subscribers.\n  TestEventListener* repeater();\n\n  // Sets the default_result_printer attribute to the provided listener.\n  // The listener is also added to the listener list and previous\n  // default_result_printer is removed from it and deleted. The listener can\n  // also be NULL in which case it will not be added to the list. Does\n  // nothing if the previous and the current listener objects are the same.\n  void SetDefaultResultPrinter(TestEventListener* listener);\n\n  // Sets the default_xml_generator attribute to the provided listener.  The\n  // listener is also added to the listener list and previous\n  // default_xml_generator is removed from it and deleted. The listener can\n  // also be NULL in which case it will not be added to the list. Does\n  // nothing if the previous and the current listener objects are the same.\n  void SetDefaultXmlGenerator(TestEventListener* listener);\n\n  // Controls whether events will be forwarded by the repeater to the\n  // listeners in the list.\n  bool EventForwardingEnabled() const;\n  void SuppressEventForwarding();\n\n  // The actual list of listeners.\n  internal::TestEventRepeater* repeater_;\n  // Listener responsible for the standard result output.\n  TestEventListener* default_result_printer_;\n  // Listener responsible for the creation of the XML output file.\n  TestEventListener* default_xml_generator_;\n\n  // We disallow copying TestEventListeners.\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners);\n};\n\n// A UnitTest consists of a vector of TestCases.\n//\n// This is a singleton class.  The only instance of UnitTest is\n// created when UnitTest::GetInstance() is first called.  This\n// instance is never deleted.\n//\n// UnitTest is not copyable.\n//\n// This class is thread-safe as long as the methods are called\n// according to their specification.\nclass GTEST_API_ UnitTest {\n public:\n  // Gets the singleton UnitTest object.  The first time this method\n  // is called, a UnitTest object is constructed and returned.\n  // Consecutive calls will return the same object.\n  static UnitTest* GetInstance();\n\n  // Runs all tests in this UnitTest object and prints the result.\n  // Returns 0 if successful, or 1 otherwise.\n  //\n  // This method can only be called from the main thread.\n  //\n  // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\n  int Run() GTEST_MUST_USE_RESULT_;\n\n  // Returns the working directory when the first TEST() or TEST_F()\n  // was executed.  The UnitTest object owns the string.\n  const char* original_working_dir() const;\n\n  // Returns the TestCase object for the test that's currently running,\n  // or NULL if no test is running.\n  const TestCase* current_test_case() const\n      GTEST_LOCK_EXCLUDED_(mutex_);\n\n  // Returns the TestInfo object for the test that's currently running,\n  // or NULL if no test is running.\n  const TestInfo* current_test_info() const\n      GTEST_LOCK_EXCLUDED_(mutex_);\n\n  // Returns the random seed used at the start of the current test run.\n  int random_seed() const;\n\n#if GTEST_HAS_PARAM_TEST\n  // Returns the ParameterizedTestCaseRegistry object used to keep track of\n  // value-parameterized tests and instantiate and register them.\n  //\n  // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\n  internal::ParameterizedTestCaseRegistry& parameterized_test_registry()\n      GTEST_LOCK_EXCLUDED_(mutex_);\n#endif  // GTEST_HAS_PARAM_TEST\n\n  // Gets the number of successful test cases.\n  int successful_test_case_count() const;\n\n  // Gets the number of failed test cases.\n  int failed_test_case_count() const;\n\n  // Gets the number of all test cases.\n  int total_test_case_count() const;\n\n  // Gets the number of all test cases that contain at least one test\n  // that should run.\n  int test_case_to_run_count() const;\n\n  // Gets the number of successful tests.\n  int successful_test_count() const;\n\n  // Gets the number of failed tests.\n  int failed_test_count() const;\n\n  // Gets the number of disabled tests that will be reported in the XML report.\n  int reportable_disabled_test_count() const;\n\n  // Gets the number of disabled tests.\n  int disabled_test_count() const;\n\n  // Gets the number of tests to be printed in the XML report.\n  int reportable_test_count() const;\n\n  // Gets the number of all tests.\n  int total_test_count() const;\n\n  // Gets the number of tests that should run.\n  int test_to_run_count() const;\n\n  // Gets the time of the test program start, in ms from the start of the\n  // UNIX epoch.\n  TimeInMillis start_timestamp() const;\n\n  // Gets the elapsed time, in milliseconds.\n  TimeInMillis elapsed_time() const;\n\n  // Returns true iff the unit test passed (i.e. all test cases passed).\n  bool Passed() const;\n\n  // Returns true iff the unit test failed (i.e. some test case failed\n  // or something outside of all tests failed).\n  bool Failed() const;\n\n  // Gets the i-th test case among all the test cases. i can range from 0 to\n  // total_test_case_count() - 1. If i is not in that range, returns NULL.\n  const TestCase* GetTestCase(int i) const;\n\n  // Returns the TestResult containing information on test failures and\n  // properties logged outside of individual test cases.\n  const TestResult& ad_hoc_test_result() const;\n\n  // Returns the list of event listeners that can be used to track events\n  // inside Google Test.\n  TestEventListeners& listeners();\n\n private:\n  // Registers and returns a global test environment.  When a test\n  // program is run, all global test environments will be set-up in\n  // the order they were registered.  After all tests in the program\n  // have finished, all global test environments will be torn-down in\n  // the *reverse* order they were registered.\n  //\n  // The UnitTest object takes ownership of the given environment.\n  //\n  // This method can only be called from the main thread.\n  Environment* AddEnvironment(Environment* env);\n\n  // Adds a TestPartResult to the current TestResult object.  All\n  // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc)\n  // eventually call this to report their results.  The user code\n  // should use the assertion macros instead of calling this directly.\n  void AddTestPartResult(TestPartResult::Type result_type,\n                         const char* file_name,\n                         int line_number,\n                         const std::string& message,\n                         const std::string& os_stack_trace)\n      GTEST_LOCK_EXCLUDED_(mutex_);\n\n  // Adds a TestProperty to the current TestResult object when invoked from\n  // inside a test, to current TestCase's ad_hoc_test_result_ when invoked\n  // from SetUpTestCase or TearDownTestCase, or to the global property set\n  // when invoked elsewhere.  If the result already contains a property with\n  // the same key, the value will be updated.\n  void RecordProperty(const std::string& key, const std::string& value);\n\n  // Gets the i-th test case among all the test cases. i can range from 0 to\n  // total_test_case_count() - 1. If i is not in that range, returns NULL.\n  TestCase* GetMutableTestCase(int i);\n\n  // Accessors for the implementation object.\n  internal::UnitTestImpl* impl() { return impl_; }\n  const internal::UnitTestImpl* impl() const { return impl_; }\n\n  // These classes and funcions are friends as they need to access private\n  // members of UnitTest.\n  friend class Test;\n  friend class internal::AssertHelper;\n  friend class internal::ScopedTrace;\n  friend class internal::StreamingListenerTest;\n  friend class internal::UnitTestRecordPropertyTestHelper;\n  friend Environment* AddGlobalTestEnvironment(Environment* env);\n  friend internal::UnitTestImpl* internal::GetUnitTestImpl();\n  friend void internal::ReportFailureInUnknownLocation(\n      TestPartResult::Type result_type,\n      const std::string& message);\n\n  // Creates an empty UnitTest.\n  UnitTest();\n\n  // D'tor\n  virtual ~UnitTest();\n\n  // Pushes a trace defined by SCOPED_TRACE() on to the per-thread\n  // Google Test trace stack.\n  void PushGTestTrace(const internal::TraceInfo& trace)\n      GTEST_LOCK_EXCLUDED_(mutex_);\n\n  // Pops a trace from the per-thread Google Test trace stack.\n  void PopGTestTrace()\n      GTEST_LOCK_EXCLUDED_(mutex_);\n\n  // Protects mutable state in *impl_.  This is mutable as some const\n  // methods need to lock it too.\n  mutable internal::Mutex mutex_;\n\n  // Opaque implementation object.  This field is never changed once\n  // the object is constructed.  We don't mark it as const here, as\n  // doing so will cause a warning in the constructor of UnitTest.\n  // Mutable state in *impl_ is protected by mutex_.\n  internal::UnitTestImpl* impl_;\n\n  // We disallow copying UnitTest.\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest);\n};\n\n// A convenient wrapper for adding an environment for the test\n// program.\n//\n// You should call this before RUN_ALL_TESTS() is called, probably in\n// main().  If you use gtest_main, you need to call this before main()\n// starts for it to take effect.  For example, you can define a global\n// variable like this:\n//\n//   testing::Environment* const foo_env =\n//       testing::AddGlobalTestEnvironment(new FooEnvironment);\n//\n// However, we strongly recommend you to write your own main() and\n// call AddGlobalTestEnvironment() there, as relying on initialization\n// of global variables makes the code harder to read and may cause\n// problems when you register multiple environments from different\n// translation units and the environments have dependencies among them\n// (remember that the compiler doesn't guarantee the order in which\n// global variables from different translation units are initialized).\ninline Environment* AddGlobalTestEnvironment(Environment* env) {\n  return UnitTest::GetInstance()->AddEnvironment(env);\n}\n\n// Initializes Google Test.  This must be called before calling\n// RUN_ALL_TESTS().  In particular, it parses a command line for the\n// flags that Google Test recognizes.  Whenever a Google Test flag is\n// seen, it is removed from argv, and *argc is decremented.\n//\n// No value is returned.  Instead, the Google Test flag variables are\n// updated.\n//\n// Calling the function for the second time has no user-visible effect.\nGTEST_API_ void InitGoogleTest(int* argc, char** argv);\n\n// This overloaded version can be used in Windows programs compiled in\n// UNICODE mode.\nGTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv);\n\nnamespace internal {\n\n// Separate the error generating code from the code path to reduce the stack\n// frame size of CmpHelperEQ. This helps reduce the overhead of some sanitizers\n// when calling EXPECT_* in a tight loop.\ntemplate <typename T1, typename T2>\nAssertionResult CmpHelperEQFailure(const char* expected_expression,\n                                   const char* actual_expression,\n                                   const T1& expected, const T2& actual) {\n  return EqFailure(expected_expression,\n                   actual_expression,\n                   FormatForComparisonFailureMessage(expected, actual),\n                   FormatForComparisonFailureMessage(actual, expected),\n                   false);\n}\n\n// The helper function for {ASSERT|EXPECT}_EQ.\ntemplate <typename T1, typename T2>\nAssertionResult CmpHelperEQ(const char* expected_expression,\n                            const char* actual_expression,\n                            const T1& expected,\n                            const T2& actual) {\nGTEST_DISABLE_MSC_WARNINGS_PUSH_(4389 /* signed/unsigned mismatch */)\n  if (expected == actual) {\n    return AssertionSuccess();\n  }\nGTEST_DISABLE_MSC_WARNINGS_POP_()\n\n  return CmpHelperEQFailure(expected_expression, actual_expression, expected,\n                            actual);\n}\n\n// With this overloaded version, we allow anonymous enums to be used\n// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums\n// can be implicitly cast to BiggestInt.\nGTEST_API_ AssertionResult CmpHelperEQ(const char* expected_expression,\n                                       const char* actual_expression,\n                                       BiggestInt expected,\n                                       BiggestInt actual);\n\n// The helper class for {ASSERT|EXPECT}_EQ.  The template argument\n// lhs_is_null_literal is true iff the first argument to ASSERT_EQ()\n// is a null pointer literal.  The following default implementation is\n// for lhs_is_null_literal being false.\ntemplate <bool lhs_is_null_literal>\nclass EqHelper {\n public:\n  // This templatized version is for the general case.\n  template <typename T1, typename T2>\n  static AssertionResult Compare(const char* expected_expression,\n                                 const char* actual_expression,\n                                 const T1& expected,\n                                 const T2& actual) {\n    return CmpHelperEQ(expected_expression, actual_expression, expected,\n                       actual);\n  }\n\n  // With this overloaded version, we allow anonymous enums to be used\n  // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous\n  // enums can be implicitly cast to BiggestInt.\n  //\n  // Even though its body looks the same as the above version, we\n  // cannot merge the two, as it will make anonymous enums unhappy.\n  static AssertionResult Compare(const char* expected_expression,\n                                 const char* actual_expression,\n                                 BiggestInt expected,\n                                 BiggestInt actual) {\n    return CmpHelperEQ(expected_expression, actual_expression, expected,\n                       actual);\n  }\n};\n\n// This specialization is used when the first argument to ASSERT_EQ()\n// is a null pointer literal, like NULL, false, or 0.\ntemplate <>\nclass EqHelper<true> {\n public:\n  // We define two overloaded versions of Compare().  The first\n  // version will be picked when the second argument to ASSERT_EQ() is\n  // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or\n  // EXPECT_EQ(false, a_bool).\n  template <typename T1, typename T2>\n  static AssertionResult Compare(\n      const char* expected_expression,\n      const char* actual_expression,\n      const T1& expected,\n      const T2& actual,\n      // The following line prevents this overload from being considered if T2\n      // is not a pointer type.  We need this because ASSERT_EQ(NULL, my_ptr)\n      // expands to Compare(\"\", \"\", NULL, my_ptr), which requires a conversion\n      // to match the Secret* in the other overload, which would otherwise make\n      // this template match better.\n      typename EnableIf<!is_pointer<T2>::value>::type* = 0) {\n    return CmpHelperEQ(expected_expression, actual_expression, expected,\n                       actual);\n  }\n\n  // This version will be picked when the second argument to ASSERT_EQ() is a\n  // pointer, e.g. ASSERT_EQ(NULL, a_pointer).\n  template <typename T>\n  static AssertionResult Compare(\n      const char* expected_expression,\n      const char* actual_expression,\n      // We used to have a second template parameter instead of Secret*.  That\n      // template parameter would deduce to 'long', making this a better match\n      // than the first overload even without the first overload's EnableIf.\n      // Unfortunately, gcc with -Wconversion-null warns when \"passing NULL to\n      // non-pointer argument\" (even a deduced integral argument), so the old\n      // implementation caused warnings in user code.\n      Secret* /* expected (NULL) */,\n      T* actual) {\n    // We already know that 'expected' is a null pointer.\n    return CmpHelperEQ(expected_expression, actual_expression,\n                       static_cast<T*>(NULL), actual);\n  }\n};\n\n// Separate the error generating code from the code path to reduce the stack\n// frame size of CmpHelperOP. This helps reduce the overhead of some sanitizers\n// when calling EXPECT_OP in a tight loop.\ntemplate <typename T1, typename T2>\nAssertionResult CmpHelperOpFailure(const char* expr1, const char* expr2,\n                                   const T1& val1, const T2& val2,\n                                   const char* op) {\n  return AssertionFailure()\n         << \"Expected: (\" << expr1 << \") \" << op << \" (\" << expr2\n         << \"), actual: \" << FormatForComparisonFailureMessage(val1, val2)\n         << \" vs \" << FormatForComparisonFailureMessage(val2, val1);\n}\n\n// A macro for implementing the helper functions needed to implement\n// ASSERT_?? and EXPECT_??.  It is here just to avoid copy-and-paste\n// of similar code.\n//\n// For each templatized helper function, we also define an overloaded\n// version for BiggestInt in order to reduce code bloat and allow\n// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled\n// with gcc 4.\n//\n// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\n\n#define GTEST_IMPL_CMP_HELPER_(op_name, op)\\\ntemplate <typename T1, typename T2>\\\nAssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \\\n                                   const T1& val1, const T2& val2) {\\\n  if (val1 op val2) {\\\n    return AssertionSuccess();\\\n  } else {\\\n    return CmpHelperOpFailure(expr1, expr2, val1, val2, #op);\\\n  }\\\n}\\\nGTEST_API_ AssertionResult CmpHelper##op_name(\\\n    const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2)\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\n\n// Implements the helper function for {ASSERT|EXPECT}_NE\nGTEST_IMPL_CMP_HELPER_(NE, !=);\n// Implements the helper function for {ASSERT|EXPECT}_LE\nGTEST_IMPL_CMP_HELPER_(LE, <=);\n// Implements the helper function for {ASSERT|EXPECT}_LT\nGTEST_IMPL_CMP_HELPER_(LT, <);\n// Implements the helper function for {ASSERT|EXPECT}_GE\nGTEST_IMPL_CMP_HELPER_(GE, >=);\n// Implements the helper function for {ASSERT|EXPECT}_GT\nGTEST_IMPL_CMP_HELPER_(GT, >);\n\n#undef GTEST_IMPL_CMP_HELPER_\n\n// The helper function for {ASSERT|EXPECT}_STREQ.\n//\n// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\nGTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression,\n                                          const char* actual_expression,\n                                          const char* expected,\n                                          const char* actual);\n\n// The helper function for {ASSERT|EXPECT}_STRCASEEQ.\n//\n// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\nGTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression,\n                                              const char* actual_expression,\n                                              const char* expected,\n                                              const char* actual);\n\n// The helper function for {ASSERT|EXPECT}_STRNE.\n//\n// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\nGTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,\n                                          const char* s2_expression,\n                                          const char* s1,\n                                          const char* s2);\n\n// The helper function for {ASSERT|EXPECT}_STRCASENE.\n//\n// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\nGTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression,\n                                              const char* s2_expression,\n                                              const char* s1,\n                                              const char* s2);\n\n\n// Helper function for *_STREQ on wide strings.\n//\n// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\nGTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression,\n                                          const char* actual_expression,\n                                          const wchar_t* expected,\n                                          const wchar_t* actual);\n\n// Helper function for *_STRNE on wide strings.\n//\n// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\nGTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,\n                                          const char* s2_expression,\n                                          const wchar_t* s1,\n                                          const wchar_t* s2);\n\n}  // namespace internal\n\n// IsSubstring() and IsNotSubstring() are intended to be used as the\n// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by\n// themselves.  They check whether needle is a substring of haystack\n// (NULL is considered a substring of itself only), and return an\n// appropriate error message when they fail.\n//\n// The {needle,haystack}_expr arguments are the stringified\n// expressions that generated the two real arguments.\nGTEST_API_ AssertionResult IsSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const char* needle, const char* haystack);\nGTEST_API_ AssertionResult IsSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const wchar_t* needle, const wchar_t* haystack);\nGTEST_API_ AssertionResult IsNotSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const char* needle, const char* haystack);\nGTEST_API_ AssertionResult IsNotSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const wchar_t* needle, const wchar_t* haystack);\nGTEST_API_ AssertionResult IsSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const ::std::string& needle, const ::std::string& haystack);\nGTEST_API_ AssertionResult IsNotSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const ::std::string& needle, const ::std::string& haystack);\n\n#if GTEST_HAS_STD_WSTRING\nGTEST_API_ AssertionResult IsSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const ::std::wstring& needle, const ::std::wstring& haystack);\nGTEST_API_ AssertionResult IsNotSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const ::std::wstring& needle, const ::std::wstring& haystack);\n#endif  // GTEST_HAS_STD_WSTRING\n\nnamespace internal {\n\n// Helper template function for comparing floating-points.\n//\n// Template parameter:\n//\n//   RawType: the raw floating-point type (either float or double)\n//\n// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\ntemplate <typename RawType>\nAssertionResult CmpHelperFloatingPointEQ(const char* expected_expression,\n                                         const char* actual_expression,\n                                         RawType expected,\n                                         RawType actual) {\n  const FloatingPoint<RawType> lhs(expected), rhs(actual);\n\n  if (lhs.AlmostEquals(rhs)) {\n    return AssertionSuccess();\n  }\n\n  ::std::stringstream expected_ss;\n  expected_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)\n              << expected;\n\n  ::std::stringstream actual_ss;\n  actual_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)\n            << actual;\n\n  return EqFailure(expected_expression,\n                   actual_expression,\n                   StringStreamToString(&expected_ss),\n                   StringStreamToString(&actual_ss),\n                   false);\n}\n\n// Helper function for implementing ASSERT_NEAR.\n//\n// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.\nGTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1,\n                                                const char* expr2,\n                                                const char* abs_error_expr,\n                                                double val1,\n                                                double val2,\n                                                double abs_error);\n\n// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.\n// A class that enables one to stream messages to assertion macros\nclass GTEST_API_ AssertHelper {\n public:\n  // Constructor.\n  AssertHelper(TestPartResult::Type type,\n               const char* file,\n               int line,\n               const char* message);\n  ~AssertHelper();\n\n  // Message assignment is a semantic trick to enable assertion\n  // streaming; see the GTEST_MESSAGE_ macro below.\n  void operator=(const Message& message) const;\n\n private:\n  // We put our data in a struct so that the size of the AssertHelper class can\n  // be as small as possible.  This is important because gcc is incapable of\n  // re-using stack space even for temporary variables, so every EXPECT_EQ\n  // reserves stack space for another AssertHelper.\n  struct AssertHelperData {\n    AssertHelperData(TestPartResult::Type t,\n                     const char* srcfile,\n                     int line_num,\n                     const char* msg)\n        : type(t), file(srcfile), line(line_num), message(msg) { }\n\n    TestPartResult::Type const type;\n    const char* const file;\n    int const line;\n    std::string const message;\n\n   private:\n    GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData);\n  };\n\n  AssertHelperData* const data_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper);\n};\n\n}  // namespace internal\n\n#if GTEST_HAS_PARAM_TEST\n// The pure interface class that all value-parameterized tests inherit from.\n// A value-parameterized class must inherit from both ::testing::Test and\n// ::testing::WithParamInterface. In most cases that just means inheriting\n// from ::testing::TestWithParam, but more complicated test hierarchies\n// may need to inherit from Test and WithParamInterface at different levels.\n//\n// This interface has support for accessing the test parameter value via\n// the GetParam() method.\n//\n// Use it with one of the parameter generator defining functions, like Range(),\n// Values(), ValuesIn(), Bool(), and Combine().\n//\n// class FooTest : public ::testing::TestWithParam<int> {\n//  protected:\n//   FooTest() {\n//     // Can use GetParam() here.\n//   }\n//   virtual ~FooTest() {\n//     // Can use GetParam() here.\n//   }\n//   virtual void SetUp() {\n//     // Can use GetParam() here.\n//   }\n//   virtual void TearDown {\n//     // Can use GetParam() here.\n//   }\n// };\n// TEST_P(FooTest, DoesBar) {\n//   // Can use GetParam() method here.\n//   Foo foo;\n//   ASSERT_TRUE(foo.DoesBar(GetParam()));\n// }\n// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10));\n\ntemplate <typename T>\nclass WithParamInterface {\n public:\n  typedef T ParamType;\n  virtual ~WithParamInterface() {}\n\n  // The current parameter value. Is also available in the test fixture's\n  // constructor. This member function is non-static, even though it only\n  // references static data, to reduce the opportunity for incorrect uses\n  // like writing 'WithParamInterface<bool>::GetParam()' for a test that\n  // uses a fixture whose parameter type is int.\n  const ParamType& GetParam() const {\n    GTEST_CHECK_(parameter_ != NULL)\n        << \"GetParam() can only be called inside a value-parameterized test \"\n        << \"-- did you intend to write TEST_P instead of TEST_F?\";\n    return *parameter_;\n  }\n\n private:\n  // Sets parameter value. The caller is responsible for making sure the value\n  // remains alive and unchanged throughout the current test.\n  static void SetParam(const ParamType* parameter) {\n    parameter_ = parameter;\n  }\n\n  // Static value used for accessing parameter during a test lifetime.\n  static const ParamType* parameter_;\n\n  // TestClass must be a subclass of WithParamInterface<T> and Test.\n  template <class TestClass> friend class internal::ParameterizedTestFactory;\n};\n\ntemplate <typename T>\nconst T* WithParamInterface<T>::parameter_ = NULL;\n\n// Most value-parameterized classes can ignore the existence of\n// WithParamInterface, and can just inherit from ::testing::TestWithParam.\n\ntemplate <typename T>\nclass TestWithParam : public Test, public WithParamInterface<T> {\n};\n\n#endif  // GTEST_HAS_PARAM_TEST\n\n// Macros for indicating success/failure in test code.\n\n// ADD_FAILURE unconditionally adds a failure to the current test.\n// SUCCEED generates a success - it doesn't automatically make the\n// current test successful, as a test is only successful when it has\n// no failure.\n//\n// EXPECT_* verifies that a certain condition is satisfied.  If not,\n// it behaves like ADD_FAILURE.  In particular:\n//\n//   EXPECT_TRUE  verifies that a Boolean condition is true.\n//   EXPECT_FALSE verifies that a Boolean condition is false.\n//\n// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except\n// that they will also abort the current function on failure.  People\n// usually want the fail-fast behavior of FAIL and ASSERT_*, but those\n// writing data-driven tests often find themselves using ADD_FAILURE\n// and EXPECT_* more.\n\n// Generates a nonfatal failure with a generic message.\n#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_(\"Failed\")\n\n// Generates a nonfatal failure at the given source file location with\n// a generic message.\n#define ADD_FAILURE_AT(file, line) \\\n  GTEST_MESSAGE_AT_(file, line, \"Failed\", \\\n                    ::testing::TestPartResult::kNonFatalFailure)\n\n// Generates a fatal failure with a generic message.\n#define GTEST_FAIL() GTEST_FATAL_FAILURE_(\"Failed\")\n\n// Define this macro to 1 to omit the definition of FAIL(), which is a\n// generic name and clashes with some other libraries.\n#if !GTEST_DONT_DEFINE_FAIL\n# define FAIL() GTEST_FAIL()\n#endif\n\n// Generates a success with a generic message.\n#define GTEST_SUCCEED() GTEST_SUCCESS_(\"Succeeded\")\n\n// Define this macro to 1 to omit the definition of SUCCEED(), which\n// is a generic name and clashes with some other libraries.\n#if !GTEST_DONT_DEFINE_SUCCEED\n# define SUCCEED() GTEST_SUCCEED()\n#endif\n\n// Macros for testing exceptions.\n//\n//    * {ASSERT|EXPECT}_THROW(statement, expected_exception):\n//         Tests that the statement throws the expected exception.\n//    * {ASSERT|EXPECT}_NO_THROW(statement):\n//         Tests that the statement doesn't throw any exception.\n//    * {ASSERT|EXPECT}_ANY_THROW(statement):\n//         Tests that the statement throws an exception.\n\n#define EXPECT_THROW(statement, expected_exception) \\\n  GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_)\n#define EXPECT_NO_THROW(statement) \\\n  GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_)\n#define EXPECT_ANY_THROW(statement) \\\n  GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_)\n#define ASSERT_THROW(statement, expected_exception) \\\n  GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_)\n#define ASSERT_NO_THROW(statement) \\\n  GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_)\n#define ASSERT_ANY_THROW(statement) \\\n  GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_)\n\n// Boolean assertions. Condition can be either a Boolean expression or an\n// AssertionResult. For more information on how to use AssertionResult with\n// these macros see comments on that class.\n#define EXPECT_TRUE(condition) \\\n  GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \\\n                      GTEST_NONFATAL_FAILURE_)\n#define EXPECT_FALSE(condition) \\\n  GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \\\n                      GTEST_NONFATAL_FAILURE_)\n#define ASSERT_TRUE(condition) \\\n  GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \\\n                      GTEST_FATAL_FAILURE_)\n#define ASSERT_FALSE(condition) \\\n  GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \\\n                      GTEST_FATAL_FAILURE_)\n\n// Includes the auto-generated header that implements a family of\n// generic predicate assertion macros.\n// Copyright 2006, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is AUTOMATICALLY GENERATED on 10/31/2011 by command\n// 'gen_gtest_pred_impl.py 5'.  DO NOT EDIT BY HAND!\n//\n// Implements a family of generic predicate assertion macros.\n\n#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_\n#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_\n\n// Makes sure this header is not included before gtest.h.\n#ifndef GTEST_INCLUDE_GTEST_GTEST_H_\n# error Do not include gtest_pred_impl.h directly.  Include gtest.h instead.\n#endif  // GTEST_INCLUDE_GTEST_GTEST_H_\n\n// This header implements a family of generic predicate assertion\n// macros:\n//\n//   ASSERT_PRED_FORMAT1(pred_format, v1)\n//   ASSERT_PRED_FORMAT2(pred_format, v1, v2)\n//   ...\n//\n// where pred_format is a function or functor that takes n (in the\n// case of ASSERT_PRED_FORMATn) values and their source expression\n// text, and returns a testing::AssertionResult.  See the definition\n// of ASSERT_EQ in gtest.h for an example.\n//\n// If you don't care about formatting, you can use the more\n// restrictive version:\n//\n//   ASSERT_PRED1(pred, v1)\n//   ASSERT_PRED2(pred, v1, v2)\n//   ...\n//\n// where pred is an n-ary function or functor that returns bool,\n// and the values v1, v2, ..., must support the << operator for\n// streaming to std::ostream.\n//\n// We also define the EXPECT_* variations.\n//\n// For now we only support predicates whose arity is at most 5.\n// Please email googletestframework@googlegroups.com if you need\n// support for higher arities.\n\n// GTEST_ASSERT_ is the basic statement to which all of the assertions\n// in this file reduce.  Don't use this in your code.\n\n#define GTEST_ASSERT_(expression, on_failure) \\\n  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\\n  if (const ::testing::AssertionResult gtest_ar = (expression)) \\\n    ; \\\n  else \\\n    on_failure(gtest_ar.failure_message())\n\n\n// Helper function for implementing {EXPECT|ASSERT}_PRED1.  Don't use\n// this in your code.\ntemplate <typename Pred,\n          typename T1>\nAssertionResult AssertPred1Helper(const char* pred_text,\n                                  const char* e1,\n                                  Pred pred,\n                                  const T1& v1) {\n  if (pred(v1)) return AssertionSuccess();\n\n  return AssertionFailure() << pred_text << \"(\"\n                            << e1 << \") evaluates to false, where\"\n                            << \"\\n\" << e1 << \" evaluates to \" << v1;\n}\n\n// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1.\n// Don't use this in your code.\n#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\\\n  GTEST_ASSERT_(pred_format(#v1, v1), \\\n                on_failure)\n\n// Internal macro for implementing {EXPECT|ASSERT}_PRED1.  Don't use\n// this in your code.\n#define GTEST_PRED1_(pred, v1, on_failure)\\\n  GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \\\n                                             #v1, \\\n                                             pred, \\\n                                             v1), on_failure)\n\n// Unary predicate assertion macros.\n#define EXPECT_PRED_FORMAT1(pred_format, v1) \\\n  GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)\n#define EXPECT_PRED1(pred, v1) \\\n  GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)\n#define ASSERT_PRED_FORMAT1(pred_format, v1) \\\n  GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)\n#define ASSERT_PRED1(pred, v1) \\\n  GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)\n\n\n\n// Helper function for implementing {EXPECT|ASSERT}_PRED2.  Don't use\n// this in your code.\ntemplate <typename Pred,\n          typename T1,\n          typename T2>\nAssertionResult AssertPred2Helper(const char* pred_text,\n                                  const char* e1,\n                                  const char* e2,\n                                  Pred pred,\n                                  const T1& v1,\n                                  const T2& v2) {\n  if (pred(v1, v2)) return AssertionSuccess();\n\n  return AssertionFailure() << pred_text << \"(\"\n                            << e1 << \", \"\n                            << e2 << \") evaluates to false, where\"\n                            << \"\\n\" << e1 << \" evaluates to \" << v1\n                            << \"\\n\" << e2 << \" evaluates to \" << v2;\n}\n\n// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2.\n// Don't use this in your code.\n#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\\\n  GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \\\n                on_failure)\n\n// Internal macro for implementing {EXPECT|ASSERT}_PRED2.  Don't use\n// this in your code.\n#define GTEST_PRED2_(pred, v1, v2, on_failure)\\\n  GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \\\n                                             #v1, \\\n                                             #v2, \\\n                                             pred, \\\n                                             v1, \\\n                                             v2), on_failure)\n\n// Binary predicate assertion macros.\n#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \\\n  GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)\n#define EXPECT_PRED2(pred, v1, v2) \\\n  GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)\n#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \\\n  GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)\n#define ASSERT_PRED2(pred, v1, v2) \\\n  GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)\n\n\n\n// Helper function for implementing {EXPECT|ASSERT}_PRED3.  Don't use\n// this in your code.\ntemplate <typename Pred,\n          typename T1,\n          typename T2,\n          typename T3>\nAssertionResult AssertPred3Helper(const char* pred_text,\n                                  const char* e1,\n                                  const char* e2,\n                                  const char* e3,\n                                  Pred pred,\n                                  const T1& v1,\n                                  const T2& v2,\n                                  const T3& v3) {\n  if (pred(v1, v2, v3)) return AssertionSuccess();\n\n  return AssertionFailure() << pred_text << \"(\"\n                            << e1 << \", \"\n                            << e2 << \", \"\n                            << e3 << \") evaluates to false, where\"\n                            << \"\\n\" << e1 << \" evaluates to \" << v1\n                            << \"\\n\" << e2 << \" evaluates to \" << v2\n                            << \"\\n\" << e3 << \" evaluates to \" << v3;\n}\n\n// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3.\n// Don't use this in your code.\n#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\\\n  GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \\\n                on_failure)\n\n// Internal macro for implementing {EXPECT|ASSERT}_PRED3.  Don't use\n// this in your code.\n#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\\\n  GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \\\n                                             #v1, \\\n                                             #v2, \\\n                                             #v3, \\\n                                             pred, \\\n                                             v1, \\\n                                             v2, \\\n                                             v3), on_failure)\n\n// Ternary predicate assertion macros.\n#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \\\n  GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_)\n#define EXPECT_PRED3(pred, v1, v2, v3) \\\n  GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)\n#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \\\n  GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)\n#define ASSERT_PRED3(pred, v1, v2, v3) \\\n  GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)\n\n\n\n// Helper function for implementing {EXPECT|ASSERT}_PRED4.  Don't use\n// this in your code.\ntemplate <typename Pred,\n          typename T1,\n          typename T2,\n          typename T3,\n          typename T4>\nAssertionResult AssertPred4Helper(const char* pred_text,\n                                  const char* e1,\n                                  const char* e2,\n                                  const char* e3,\n                                  const char* e4,\n                                  Pred pred,\n                                  const T1& v1,\n                                  const T2& v2,\n                                  const T3& v3,\n                                  const T4& v4) {\n  if (pred(v1, v2, v3, v4)) return AssertionSuccess();\n\n  return AssertionFailure() << pred_text << \"(\"\n                            << e1 << \", \"\n                            << e2 << \", \"\n                            << e3 << \", \"\n                            << e4 << \") evaluates to false, where\"\n                            << \"\\n\" << e1 << \" evaluates to \" << v1\n                            << \"\\n\" << e2 << \" evaluates to \" << v2\n                            << \"\\n\" << e3 << \" evaluates to \" << v3\n                            << \"\\n\" << e4 << \" evaluates to \" << v4;\n}\n\n// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4.\n// Don't use this in your code.\n#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\\\n  GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \\\n                on_failure)\n\n// Internal macro for implementing {EXPECT|ASSERT}_PRED4.  Don't use\n// this in your code.\n#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\\\n  GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \\\n                                             #v1, \\\n                                             #v2, \\\n                                             #v3, \\\n                                             #v4, \\\n                                             pred, \\\n                                             v1, \\\n                                             v2, \\\n                                             v3, \\\n                                             v4), on_failure)\n\n// 4-ary predicate assertion macros.\n#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \\\n  GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)\n#define EXPECT_PRED4(pred, v1, v2, v3, v4) \\\n  GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)\n#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \\\n  GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)\n#define ASSERT_PRED4(pred, v1, v2, v3, v4) \\\n  GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)\n\n\n\n// Helper function for implementing {EXPECT|ASSERT}_PRED5.  Don't use\n// this in your code.\ntemplate <typename Pred,\n          typename T1,\n          typename T2,\n          typename T3,\n          typename T4,\n          typename T5>\nAssertionResult AssertPred5Helper(const char* pred_text,\n                                  const char* e1,\n                                  const char* e2,\n                                  const char* e3,\n                                  const char* e4,\n                                  const char* e5,\n                                  Pred pred,\n                                  const T1& v1,\n                                  const T2& v2,\n                                  const T3& v3,\n                                  const T4& v4,\n                                  const T5& v5) {\n  if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess();\n\n  return AssertionFailure() << pred_text << \"(\"\n                            << e1 << \", \"\n                            << e2 << \", \"\n                            << e3 << \", \"\n                            << e4 << \", \"\n                            << e5 << \") evaluates to false, where\"\n                            << \"\\n\" << e1 << \" evaluates to \" << v1\n                            << \"\\n\" << e2 << \" evaluates to \" << v2\n                            << \"\\n\" << e3 << \" evaluates to \" << v3\n                            << \"\\n\" << e4 << \" evaluates to \" << v4\n                            << \"\\n\" << e5 << \" evaluates to \" << v5;\n}\n\n// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5.\n// Don't use this in your code.\n#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\\\n  GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \\\n                on_failure)\n\n// Internal macro for implementing {EXPECT|ASSERT}_PRED5.  Don't use\n// this in your code.\n#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\\\n  GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \\\n                                             #v1, \\\n                                             #v2, \\\n                                             #v3, \\\n                                             #v4, \\\n                                             #v5, \\\n                                             pred, \\\n                                             v1, \\\n                                             v2, \\\n                                             v3, \\\n                                             v4, \\\n                                             v5), on_failure)\n\n// 5-ary predicate assertion macros.\n#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \\\n  GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)\n#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \\\n  GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)\n#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \\\n  GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)\n#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \\\n  GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)\n\n\n\n#endif  // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_\n\n// Macros for testing equalities and inequalities.\n//\n//    * {ASSERT|EXPECT}_EQ(expected, actual): Tests that expected == actual\n//    * {ASSERT|EXPECT}_NE(v1, v2):           Tests that v1 != v2\n//    * {ASSERT|EXPECT}_LT(v1, v2):           Tests that v1 < v2\n//    * {ASSERT|EXPECT}_LE(v1, v2):           Tests that v1 <= v2\n//    * {ASSERT|EXPECT}_GT(v1, v2):           Tests that v1 > v2\n//    * {ASSERT|EXPECT}_GE(v1, v2):           Tests that v1 >= v2\n//\n// When they are not, Google Test prints both the tested expressions and\n// their actual values.  The values must be compatible built-in types,\n// or you will get a compiler error.  By \"compatible\" we mean that the\n// values can be compared by the respective operator.\n//\n// Note:\n//\n//   1. It is possible to make a user-defined type work with\n//   {ASSERT|EXPECT}_??(), but that requires overloading the\n//   comparison operators and is thus discouraged by the Google C++\n//   Usage Guide.  Therefore, you are advised to use the\n//   {ASSERT|EXPECT}_TRUE() macro to assert that two objects are\n//   equal.\n//\n//   2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on\n//   pointers (in particular, C strings).  Therefore, if you use it\n//   with two C strings, you are testing how their locations in memory\n//   are related, not how their content is related.  To compare two C\n//   strings by content, use {ASSERT|EXPECT}_STR*().\n//\n//   3. {ASSERT|EXPECT}_EQ(expected, actual) is preferred to\n//   {ASSERT|EXPECT}_TRUE(expected == actual), as the former tells you\n//   what the actual value is when it fails, and similarly for the\n//   other comparisons.\n//\n//   4. Do not depend on the order in which {ASSERT|EXPECT}_??()\n//   evaluate their arguments, which is undefined.\n//\n//   5. These macros evaluate their arguments exactly once.\n//\n// Examples:\n//\n//   EXPECT_NE(5, Foo());\n//   EXPECT_EQ(NULL, a_pointer);\n//   ASSERT_LT(i, array_size);\n//   ASSERT_GT(records.size(), 0) << \"There is no record left.\";\n\n#define EXPECT_EQ(expected, actual) \\\n  EXPECT_PRED_FORMAT2(::testing::internal:: \\\n                      EqHelper<GTEST_IS_NULL_LITERAL_(expected)>::Compare, \\\n                      expected, actual)\n#define EXPECT_NE(expected, actual) \\\n  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, expected, actual)\n#define EXPECT_LE(val1, val2) \\\n  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)\n#define EXPECT_LT(val1, val2) \\\n  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)\n#define EXPECT_GE(val1, val2) \\\n  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)\n#define EXPECT_GT(val1, val2) \\\n  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)\n\n#define GTEST_ASSERT_EQ(expected, actual) \\\n  ASSERT_PRED_FORMAT2(::testing::internal:: \\\n                      EqHelper<GTEST_IS_NULL_LITERAL_(expected)>::Compare, \\\n                      expected, actual)\n#define GTEST_ASSERT_NE(val1, val2) \\\n  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2)\n#define GTEST_ASSERT_LE(val1, val2) \\\n  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)\n#define GTEST_ASSERT_LT(val1, val2) \\\n  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)\n#define GTEST_ASSERT_GE(val1, val2) \\\n  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)\n#define GTEST_ASSERT_GT(val1, val2) \\\n  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)\n\n// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of\n// ASSERT_XY(), which clashes with some users' own code.\n\n#if !GTEST_DONT_DEFINE_ASSERT_EQ\n# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2)\n#endif\n\n#if !GTEST_DONT_DEFINE_ASSERT_NE\n# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2)\n#endif\n\n#if !GTEST_DONT_DEFINE_ASSERT_LE\n# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2)\n#endif\n\n#if !GTEST_DONT_DEFINE_ASSERT_LT\n# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2)\n#endif\n\n#if !GTEST_DONT_DEFINE_ASSERT_GE\n# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2)\n#endif\n\n#if !GTEST_DONT_DEFINE_ASSERT_GT\n# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2)\n#endif\n\n// C-string Comparisons.  All tests treat NULL and any non-NULL string\n// as different.  Two NULLs are equal.\n//\n//    * {ASSERT|EXPECT}_STREQ(s1, s2):     Tests that s1 == s2\n//    * {ASSERT|EXPECT}_STRNE(s1, s2):     Tests that s1 != s2\n//    * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case\n//    * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case\n//\n// For wide or narrow string objects, you can use the\n// {ASSERT|EXPECT}_??() macros.\n//\n// Don't depend on the order in which the arguments are evaluated,\n// which is undefined.\n//\n// These macros evaluate their arguments exactly once.\n\n#define EXPECT_STREQ(expected, actual) \\\n  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual)\n#define EXPECT_STRNE(s1, s2) \\\n  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)\n#define EXPECT_STRCASEEQ(expected, actual) \\\n  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual)\n#define EXPECT_STRCASENE(s1, s2)\\\n  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)\n\n#define ASSERT_STREQ(expected, actual) \\\n  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual)\n#define ASSERT_STRNE(s1, s2) \\\n  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)\n#define ASSERT_STRCASEEQ(expected, actual) \\\n  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual)\n#define ASSERT_STRCASENE(s1, s2)\\\n  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)\n\n// Macros for comparing floating-point numbers.\n//\n//    * {ASSERT|EXPECT}_FLOAT_EQ(expected, actual):\n//         Tests that two float values are almost equal.\n//    * {ASSERT|EXPECT}_DOUBLE_EQ(expected, actual):\n//         Tests that two double values are almost equal.\n//    * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error):\n//         Tests that v1 and v2 are within the given distance to each other.\n//\n// Google Test uses ULP-based comparison to automatically pick a default\n// error bound that is appropriate for the operands.  See the\n// FloatingPoint template class in gtest-internal.h if you are\n// interested in the implementation details.\n\n#define EXPECT_FLOAT_EQ(expected, actual)\\\n  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \\\n                      expected, actual)\n\n#define EXPECT_DOUBLE_EQ(expected, actual)\\\n  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \\\n                      expected, actual)\n\n#define ASSERT_FLOAT_EQ(expected, actual)\\\n  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \\\n                      expected, actual)\n\n#define ASSERT_DOUBLE_EQ(expected, actual)\\\n  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \\\n                      expected, actual)\n\n#define EXPECT_NEAR(val1, val2, abs_error)\\\n  EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \\\n                      val1, val2, abs_error)\n\n#define ASSERT_NEAR(val1, val2, abs_error)\\\n  ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \\\n                      val1, val2, abs_error)\n\n// These predicate format functions work on floating-point values, and\n// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g.\n//\n//   EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0);\n\n// Asserts that val1 is less than, or almost equal to, val2.  Fails\n// otherwise.  In particular, it fails if either val1 or val2 is NaN.\nGTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2,\n                                   float val1, float val2);\nGTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2,\n                                    double val1, double val2);\n\n\n#if GTEST_OS_WINDOWS\n\n// Macros that test for HRESULT failure and success, these are only useful\n// on Windows, and rely on Windows SDK macros and APIs to compile.\n//\n//    * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr)\n//\n// When expr unexpectedly fails or succeeds, Google Test prints the\n// expected result and the actual result with both a human-readable\n// string representation of the error, if available, as well as the\n// hex result code.\n# define EXPECT_HRESULT_SUCCEEDED(expr) \\\n    EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))\n\n# define ASSERT_HRESULT_SUCCEEDED(expr) \\\n    ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))\n\n# define EXPECT_HRESULT_FAILED(expr) \\\n    EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))\n\n# define ASSERT_HRESULT_FAILED(expr) \\\n    ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))\n\n#endif  // GTEST_OS_WINDOWS\n\n// Macros that execute statement and check that it doesn't generate new fatal\n// failures in the current thread.\n//\n//   * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement);\n//\n// Examples:\n//\n//   EXPECT_NO_FATAL_FAILURE(Process());\n//   ASSERT_NO_FATAL_FAILURE(Process()) << \"Process() failed\";\n//\n#define ASSERT_NO_FATAL_FAILURE(statement) \\\n    GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_)\n#define EXPECT_NO_FATAL_FAILURE(statement) \\\n    GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_)\n\n// Causes a trace (including the source file path, the current line\n// number, and the given message) to be included in every test failure\n// message generated by code in the current scope.  The effect is\n// undone when the control leaves the current scope.\n//\n// The message argument can be anything streamable to std::ostream.\n//\n// In the implementation, we include the current line number as part\n// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s\n// to appear in the same block - as long as they are on different\n// lines.\n#define SCOPED_TRACE(message) \\\n  ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\\\n    __FILE__, __LINE__, ::testing::Message() << (message))\n\n// Compile-time assertion for type equality.\n// StaticAssertTypeEq<type1, type2>() compiles iff type1 and type2 are\n// the same type.  The value it returns is not interesting.\n//\n// Instead of making StaticAssertTypeEq a class template, we make it a\n// function template that invokes a helper class template.  This\n// prevents a user from misusing StaticAssertTypeEq<T1, T2> by\n// defining objects of that type.\n//\n// CAVEAT:\n//\n// When used inside a method of a class template,\n// StaticAssertTypeEq<T1, T2>() is effective ONLY IF the method is\n// instantiated.  For example, given:\n//\n//   template <typename T> class Foo {\n//    public:\n//     void Bar() { testing::StaticAssertTypeEq<int, T>(); }\n//   };\n//\n// the code:\n//\n//   void Test1() { Foo<bool> foo; }\n//\n// will NOT generate a compiler error, as Foo<bool>::Bar() is never\n// actually instantiated.  Instead, you need:\n//\n//   void Test2() { Foo<bool> foo; foo.Bar(); }\n//\n// to cause a compiler error.\ntemplate <typename T1, typename T2>\nbool StaticAssertTypeEq() {\n  (void)internal::StaticAssertTypeEqHelper<T1, T2>();\n  return true;\n}\n\n// Defines a test.\n//\n// The first parameter is the name of the test case, and the second\n// parameter is the name of the test within the test case.\n//\n// The convention is to end the test case name with \"Test\".  For\n// example, a test case for the Foo class can be named FooTest.\n//\n// Test code should appear between braces after an invocation of\n// this macro.  Example:\n//\n//   TEST(FooTest, InitializesCorrectly) {\n//     Foo foo;\n//     EXPECT_TRUE(foo.StatusIsOK());\n//   }\n\n// Note that we call GetTestTypeId() instead of GetTypeId<\n// ::testing::Test>() here to get the type ID of testing::Test.  This\n// is to work around a suspected linker bug when using Google Test as\n// a framework on Mac OS X.  The bug causes GetTypeId<\n// ::testing::Test>() to return different values depending on whether\n// the call is from the Google Test framework itself or from user test\n// code.  GetTestTypeId() is guaranteed to always return the same\n// value, as it always calls GetTypeId<>() from the Google Test\n// framework.\n#define GTEST_TEST(test_case_name, test_name)\\\n  GTEST_TEST_(test_case_name, test_name, \\\n              ::testing::Test, ::testing::internal::GetTestTypeId())\n\n// Define this macro to 1 to omit the definition of TEST(), which\n// is a generic name and clashes with some other libraries.\n#if !GTEST_DONT_DEFINE_TEST\n# define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name)\n#endif\n\n// Defines a test that uses a test fixture.\n//\n// The first parameter is the name of the test fixture class, which\n// also doubles as the test case name.  The second parameter is the\n// name of the test within the test case.\n//\n// A test fixture class must be declared earlier.  The user should put\n// his test code between braces after using this macro.  Example:\n//\n//   class FooTest : public testing::Test {\n//    protected:\n//     virtual void SetUp() { b_.AddElement(3); }\n//\n//     Foo a_;\n//     Foo b_;\n//   };\n//\n//   TEST_F(FooTest, InitializesCorrectly) {\n//     EXPECT_TRUE(a_.StatusIsOK());\n//   }\n//\n//   TEST_F(FooTest, ReturnsElementCountCorrectly) {\n//     EXPECT_EQ(0, a_.size());\n//     EXPECT_EQ(1, b_.size());\n//   }\n\n#define TEST_F(test_fixture, test_name)\\\n  GTEST_TEST_(test_fixture, test_name, test_fixture, \\\n              ::testing::internal::GetTypeId<test_fixture>())\n\n}  // namespace testing\n\n// Use this function in main() to run all tests.  It returns 0 if all\n// tests are successful, or 1 otherwise.\n//\n// RUN_ALL_TESTS() should be invoked after the command line has been\n// parsed by InitGoogleTest().\n//\n// This function was formerly a macro; thus, it is in the global\n// namespace and has an all-caps name.\nint RUN_ALL_TESTS() GTEST_MUST_USE_RESULT_;\n\ninline int RUN_ALL_TESTS() {\n  return ::testing::UnitTest::GetInstance()->Run();\n}\n\n#endif  // GTEST_INCLUDE_GTEST_GTEST_H_\n"
  },
  {
    "path": "libs/gtest_mpi/external/gtest/src/gtest-all.cpp",
    "content": "// Copyright 2008, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: mheule@google.com (Markus Heule)\n//\n// Google C++ Testing Framework (Google Test)\n//\n// Sometimes it's desirable to build Google Test by compiling a single file.\n// This file serves this purpose.\n\n// This line ensures that gtest.h can be compiled on its own, even\n// when it's fused.\n#include \"gtest/gtest.h\"\n\n// The following lines pull in the real gtest *.cc files.\n// Copyright 2005, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan)\n//\n// The Google C++ Testing Framework (Google Test)\n\n// Copyright 2007, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan)\n//\n// Utilities for testing Google Test itself and code that uses Google Test\n// (e.g. frameworks built on top of Google Test).\n\n#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_\n#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_\n\n\nnamespace testing {\n\n// This helper class can be used to mock out Google Test failure reporting\n// so that we can test Google Test or code that builds on Google Test.\n//\n// An object of this class appends a TestPartResult object to the\n// TestPartResultArray object given in the constructor whenever a Google Test\n// failure is reported. It can either intercept only failures that are\n// generated in the same thread that created this object or it can intercept\n// all generated failures. The scope of this mock object can be controlled with\n// the second argument to the two arguments constructor.\nclass GTEST_API_ ScopedFakeTestPartResultReporter\n    : public TestPartResultReporterInterface {\n public:\n  // The two possible mocking modes of this object.\n  enum InterceptMode {\n    INTERCEPT_ONLY_CURRENT_THREAD,  // Intercepts only thread local failures.\n    INTERCEPT_ALL_THREADS           // Intercepts all failures.\n  };\n\n  // The c'tor sets this object as the test part result reporter used\n  // by Google Test.  The 'result' parameter specifies where to report the\n  // results. This reporter will only catch failures generated in the current\n  // thread. DEPRECATED\n  explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result);\n\n  // Same as above, but you can choose the interception scope of this object.\n  ScopedFakeTestPartResultReporter(InterceptMode intercept_mode,\n                                   TestPartResultArray* result);\n\n  // The d'tor restores the previous test part result reporter.\n  virtual ~ScopedFakeTestPartResultReporter();\n\n  // Appends the TestPartResult object to the TestPartResultArray\n  // received in the constructor.\n  //\n  // This method is from the TestPartResultReporterInterface\n  // interface.\n  virtual void ReportTestPartResult(const TestPartResult& result);\n private:\n  void Init();\n\n  const InterceptMode intercept_mode_;\n  TestPartResultReporterInterface* old_reporter_;\n  TestPartResultArray* const result_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);\n};\n\nnamespace internal {\n\n// A helper class for implementing EXPECT_FATAL_FAILURE() and\n// EXPECT_NONFATAL_FAILURE().  Its destructor verifies that the given\n// TestPartResultArray contains exactly one failure that has the given\n// type and contains the given substring.  If that's not the case, a\n// non-fatal failure will be generated.\nclass GTEST_API_ SingleFailureChecker {\n public:\n  // The constructor remembers the arguments.\n  SingleFailureChecker(const TestPartResultArray* results,\n                       TestPartResult::Type type,\n                       const string& substr);\n  ~SingleFailureChecker();\n private:\n  const TestPartResultArray* const results_;\n  const TestPartResult::Type type_;\n  const string substr_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);\n};\n\n}  // namespace internal\n\n}  // namespace testing\n\n// A set of macros for testing Google Test assertions or code that's expected\n// to generate Google Test fatal failures.  It verifies that the given\n// statement will cause exactly one fatal Google Test failure with 'substr'\n// being part of the failure message.\n//\n// There are two different versions of this macro. EXPECT_FATAL_FAILURE only\n// affects and considers failures generated in the current thread and\n// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.\n//\n// The verification of the assertion is done correctly even when the statement\n// throws an exception or aborts the current function.\n//\n// Known restrictions:\n//   - 'statement' cannot reference local non-static variables or\n//     non-static members of the current object.\n//   - 'statement' cannot return a value.\n//   - You cannot stream a failure message to this macro.\n//\n// Note that even though the implementations of the following two\n// macros are much alike, we cannot refactor them to use a common\n// helper macro, due to some peculiarity in how the preprocessor\n// works.  The AcceptsMacroThatExpandsToUnprotectedComma test in\n// gtest_unittest.cc will fail to compile if we do that.\n#define EXPECT_FATAL_FAILURE(statement, substr) \\\n  do { \\\n    class GTestExpectFatalFailureHelper {\\\n     public:\\\n      static void Execute() { statement; }\\\n    };\\\n    ::testing::TestPartResultArray gtest_failures;\\\n    ::testing::internal::SingleFailureChecker gtest_checker(\\\n        &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\\\n    {\\\n      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\\\n          ::testing::ScopedFakeTestPartResultReporter:: \\\n          INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\\\n      GTestExpectFatalFailureHelper::Execute();\\\n    }\\\n  } while (::testing::internal::AlwaysFalse())\n\n#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \\\n  do { \\\n    class GTestExpectFatalFailureHelper {\\\n     public:\\\n      static void Execute() { statement; }\\\n    };\\\n    ::testing::TestPartResultArray gtest_failures;\\\n    ::testing::internal::SingleFailureChecker gtest_checker(\\\n        &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\\\n    {\\\n      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\\\n          ::testing::ScopedFakeTestPartResultReporter:: \\\n          INTERCEPT_ALL_THREADS, &gtest_failures);\\\n      GTestExpectFatalFailureHelper::Execute();\\\n    }\\\n  } while (::testing::internal::AlwaysFalse())\n\n// A macro for testing Google Test assertions or code that's expected to\n// generate Google Test non-fatal failures.  It asserts that the given\n// statement will cause exactly one non-fatal Google Test failure with 'substr'\n// being part of the failure message.\n//\n// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only\n// affects and considers failures generated in the current thread and\n// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.\n//\n// 'statement' is allowed to reference local variables and members of\n// the current object.\n//\n// The verification of the assertion is done correctly even when the statement\n// throws an exception or aborts the current function.\n//\n// Known restrictions:\n//   - You cannot stream a failure message to this macro.\n//\n// Note that even though the implementations of the following two\n// macros are much alike, we cannot refactor them to use a common\n// helper macro, due to some peculiarity in how the preprocessor\n// works.  If we do that, the code won't compile when the user gives\n// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that\n// expands to code containing an unprotected comma.  The\n// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc\n// catches that.\n//\n// For the same reason, we have to write\n//   if (::testing::internal::AlwaysTrue()) { statement; }\n// instead of\n//   GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)\n// to avoid an MSVC warning on unreachable code.\n#define EXPECT_NONFATAL_FAILURE(statement, substr) \\\n  do {\\\n    ::testing::TestPartResultArray gtest_failures;\\\n    ::testing::internal::SingleFailureChecker gtest_checker(\\\n        &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \\\n        (substr));\\\n    {\\\n      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\\\n          ::testing::ScopedFakeTestPartResultReporter:: \\\n          INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\\\n      if (::testing::internal::AlwaysTrue()) { statement; }\\\n    }\\\n  } while (::testing::internal::AlwaysFalse())\n\n#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \\\n  do {\\\n    ::testing::TestPartResultArray gtest_failures;\\\n    ::testing::internal::SingleFailureChecker gtest_checker(\\\n        &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \\\n        (substr));\\\n    {\\\n      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\\\n          ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \\\n          &gtest_failures);\\\n      if (::testing::internal::AlwaysTrue()) { statement; }\\\n    }\\\n  } while (::testing::internal::AlwaysFalse())\n\n#endif  // GTEST_INCLUDE_GTEST_GTEST_SPI_H_\n\n#include <ctype.h>\n#include <math.h>\n#include <stdarg.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <time.h>\n#include <wchar.h>\n#include <wctype.h>\n\n#include <algorithm>\n#include <iomanip>\n#include <limits>\n#include <list>\n#include <map>\n#include <ostream>  // NOLINT\n#include <sstream>\n#include <vector>\n\n#if GTEST_OS_LINUX\n\n// TODO(kenton@google.com): Use autoconf to detect availability of\n// gettimeofday().\n# define GTEST_HAS_GETTIMEOFDAY_ 1\n\n# include <fcntl.h>  // NOLINT\n# include <limits.h>  // NOLINT\n# include <sched.h>  // NOLINT\n// Declares vsnprintf().  This header is not available on Windows.\n# include <strings.h>  // NOLINT\n# include <sys/mman.h>  // NOLINT\n# include <sys/time.h>  // NOLINT\n# include <unistd.h>  // NOLINT\n# include <string>\n\n#elif GTEST_OS_SYMBIAN\n# define GTEST_HAS_GETTIMEOFDAY_ 1\n# include <sys/time.h>  // NOLINT\n\n#elif GTEST_OS_ZOS\n# define GTEST_HAS_GETTIMEOFDAY_ 1\n# include <sys/time.h>  // NOLINT\n\n// On z/OS we additionally need strings.h for strcasecmp.\n# include <strings.h>  // NOLINT\n\n#elif GTEST_OS_WINDOWS_MOBILE  // We are on Windows CE.\n\n# include <windows.h>  // NOLINT\n# undef min\n\n#elif GTEST_OS_WINDOWS  // We are on Windows proper.\n\n# include <io.h>  // NOLINT\n# include <sys/timeb.h>  // NOLINT\n# include <sys/types.h>  // NOLINT\n# include <sys/stat.h>  // NOLINT\n\n# if GTEST_OS_WINDOWS_MINGW\n// MinGW has gettimeofday() but not _ftime64().\n// TODO(kenton@google.com): Use autoconf to detect availability of\n//   gettimeofday().\n// TODO(kenton@google.com): There are other ways to get the time on\n//   Windows, like GetTickCount() or GetSystemTimeAsFileTime().  MinGW\n//   supports these.  consider using them instead.\n#  define GTEST_HAS_GETTIMEOFDAY_ 1\n#  include <sys/time.h>  // NOLINT\n# endif  // GTEST_OS_WINDOWS_MINGW\n\n// cpplint thinks that the header is already included, so we want to\n// silence it.\n# include <windows.h>  // NOLINT\n# undef min\n\n#else\n\n// Assume other platforms have gettimeofday().\n// TODO(kenton@google.com): Use autoconf to detect availability of\n//   gettimeofday().\n# define GTEST_HAS_GETTIMEOFDAY_ 1\n\n// cpplint thinks that the header is already included, so we want to\n// silence it.\n# include <sys/time.h>  // NOLINT\n# include <unistd.h>  // NOLINT\n\n#endif  // GTEST_OS_LINUX\n\n#if GTEST_HAS_EXCEPTIONS\n# include <stdexcept>\n#endif\n\n#if GTEST_CAN_STREAM_RESULTS_\n# include <arpa/inet.h>  // NOLINT\n# include <netdb.h>  // NOLINT\n# include <sys/socket.h>  // NOLINT\n# include <sys/types.h>  // NOLINT\n#endif\n\n// Indicates that this translation unit is part of Google Test's\n// implementation.  It must come before gtest-internal-inl.h is\n// included, or there will be a compiler error.  This trick is to\n// prevent a user from accidentally including gtest-internal-inl.h in\n// his code.\n#define GTEST_IMPLEMENTATION_ 1\n// Copyright 2005, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Utility functions and classes used by the Google C++ testing framework.\n//\n// Author: wan@google.com (Zhanyong Wan)\n//\n// This file contains purely Google Test's internal implementation.  Please\n// DO NOT #INCLUDE IT IN A USER PROGRAM.\n\n#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_\n#define GTEST_SRC_GTEST_INTERNAL_INL_H_\n\n// GTEST_IMPLEMENTATION_ is defined to 1 iff the current translation unit is\n// part of Google Test's implementation; otherwise it's undefined.\n#if !GTEST_IMPLEMENTATION_\n// If this file is included from the user's code, just say no.\n# error \"gtest-internal-inl.h is part of Google Test's internal implementation.\"\n# error \"It must not be included except by Google Test itself.\"\n#endif  // GTEST_IMPLEMENTATION_\n\n#ifndef _WIN32_WCE\n# include <errno.h>\n#endif  // !_WIN32_WCE\n#include <stddef.h>\n#include <stdlib.h>  // For strtoll/_strtoul64/malloc/free.\n#include <string.h>  // For memmove.\n\n#include <algorithm>\n#include <string>\n#include <vector>\n\n\n#if GTEST_CAN_STREAM_RESULTS_\n# include <arpa/inet.h>  // NOLINT\n# include <netdb.h>  // NOLINT\n#endif\n\n#if GTEST_OS_WINDOWS\n# include <windows.h>  // NOLINT\n#endif  // GTEST_OS_WINDOWS\n\n\nnamespace testing {\n\n// Declares the flags.\n//\n// We don't want the users to modify this flag in the code, but want\n// Google Test's own unit tests to be able to access it. Therefore we\n// declare it here as opposed to in gtest.h.\nGTEST_DECLARE_bool_(death_test_use_fork);\n\nnamespace internal {\n\n// The value of GetTestTypeId() as seen from within the Google Test\n// library.  This is solely for testing GetTestTypeId().\nGTEST_API_ extern const TypeId kTestTypeIdInGoogleTest;\n\n// Names of the flags (needed for parsing Google Test flags).\nconst char kAlsoRunDisabledTestsFlag[] = \"also_run_disabled_tests\";\nconst char kBreakOnFailureFlag[] = \"break_on_failure\";\nconst char kCatchExceptionsFlag[] = \"catch_exceptions\";\nconst char kColorFlag[] = \"color\";\nconst char kFilterFlag[] = \"filter\";\nconst char kListTestsFlag[] = \"list_tests\";\nconst char kOutputFlag[] = \"output\";\nconst char kPrintTimeFlag[] = \"print_time\";\nconst char kRandomSeedFlag[] = \"random_seed\";\nconst char kRepeatFlag[] = \"repeat\";\nconst char kShuffleFlag[] = \"shuffle\";\nconst char kStackTraceDepthFlag[] = \"stack_trace_depth\";\nconst char kStreamResultToFlag[] = \"stream_result_to\";\nconst char kThrowOnFailureFlag[] = \"throw_on_failure\";\nconst char kFlagfileFlag[] = \"flagfile\";\n\n// A valid random seed must be in [1, kMaxRandomSeed].\nconst int kMaxRandomSeed = 99999;\n\n// g_help_flag is true iff the --help flag or an equivalent form is\n// specified on the command line.\nGTEST_API_ extern bool g_help_flag;\n\n// Returns the current time in milliseconds.\nGTEST_API_ TimeInMillis GetTimeInMillis();\n\n// Returns true iff Google Test should use colors in the output.\nGTEST_API_ bool ShouldUseColor(bool stdout_is_tty);\n\n// Formats the given time in milliseconds as seconds.\nGTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms);\n\n// Converts the given time in milliseconds to a date string in the ISO 8601\n// format, without the timezone information.  N.B.: due to the use the\n// non-reentrant localtime() function, this function is not thread safe.  Do\n// not use it in any code that can be called from multiple threads.\nGTEST_API_ std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms);\n\n// Parses a string for an Int32 flag, in the form of \"--flag=value\".\n//\n// On success, stores the value of the flag in *value, and returns\n// true.  On failure, returns false without changing *value.\nGTEST_API_ bool ParseInt32Flag(\n    const char* str, const char* flag, Int32* value);\n\n// Returns a random seed in range [1, kMaxRandomSeed] based on the\n// given --gtest_random_seed flag value.\ninline int GetRandomSeedFromFlag(Int32 random_seed_flag) {\n  const unsigned int raw_seed = (random_seed_flag == 0) ?\n      static_cast<unsigned int>(GetTimeInMillis()) :\n      static_cast<unsigned int>(random_seed_flag);\n\n  // Normalizes the actual seed to range [1, kMaxRandomSeed] such that\n  // it's easy to type.\n  const int normalized_seed =\n      static_cast<int>((raw_seed - 1U) %\n                       static_cast<unsigned int>(kMaxRandomSeed)) + 1;\n  return normalized_seed;\n}\n\n// Returns the first valid random seed after 'seed'.  The behavior is\n// undefined if 'seed' is invalid.  The seed after kMaxRandomSeed is\n// considered to be 1.\ninline int GetNextRandomSeed(int seed) {\n  GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed)\n      << \"Invalid random seed \" << seed << \" - must be in [1, \"\n      << kMaxRandomSeed << \"].\";\n  const int next_seed = seed + 1;\n  return (next_seed > kMaxRandomSeed) ? 1 : next_seed;\n}\n\n// This class saves the values of all Google Test flags in its c'tor, and\n// restores them in its d'tor.\nclass GTestFlagSaver {\n public:\n  // The c'tor.\n  GTestFlagSaver() {\n    also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests);\n    break_on_failure_ = GTEST_FLAG(break_on_failure);\n    catch_exceptions_ = GTEST_FLAG(catch_exceptions);\n    color_ = GTEST_FLAG(color);\n    death_test_style_ = GTEST_FLAG(death_test_style);\n    death_test_use_fork_ = GTEST_FLAG(death_test_use_fork);\n    filter_ = GTEST_FLAG(filter);\n    internal_run_death_test_ = GTEST_FLAG(internal_run_death_test);\n    list_tests_ = GTEST_FLAG(list_tests);\n    output_ = GTEST_FLAG(output);\n    print_time_ = GTEST_FLAG(print_time);\n    random_seed_ = GTEST_FLAG(random_seed);\n    repeat_ = GTEST_FLAG(repeat);\n    shuffle_ = GTEST_FLAG(shuffle);\n    stack_trace_depth_ = GTEST_FLAG(stack_trace_depth);\n    stream_result_to_ = GTEST_FLAG(stream_result_to);\n    throw_on_failure_ = GTEST_FLAG(throw_on_failure);\n  }\n\n  // The d'tor is not virtual.  DO NOT INHERIT FROM THIS CLASS.\n  ~GTestFlagSaver() {\n    GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_;\n    GTEST_FLAG(break_on_failure) = break_on_failure_;\n    GTEST_FLAG(catch_exceptions) = catch_exceptions_;\n    GTEST_FLAG(color) = color_;\n    GTEST_FLAG(death_test_style) = death_test_style_;\n    GTEST_FLAG(death_test_use_fork) = death_test_use_fork_;\n    GTEST_FLAG(filter) = filter_;\n    GTEST_FLAG(internal_run_death_test) = internal_run_death_test_;\n    GTEST_FLAG(list_tests) = list_tests_;\n    GTEST_FLAG(output) = output_;\n    GTEST_FLAG(print_time) = print_time_;\n    GTEST_FLAG(random_seed) = random_seed_;\n    GTEST_FLAG(repeat) = repeat_;\n    GTEST_FLAG(shuffle) = shuffle_;\n    GTEST_FLAG(stack_trace_depth) = stack_trace_depth_;\n    GTEST_FLAG(stream_result_to) = stream_result_to_;\n    GTEST_FLAG(throw_on_failure) = throw_on_failure_;\n  }\n\n private:\n  // Fields for saving the original values of flags.\n  bool also_run_disabled_tests_;\n  bool break_on_failure_;\n  bool catch_exceptions_;\n  std::string color_;\n  std::string death_test_style_;\n  bool death_test_use_fork_;\n  std::string filter_;\n  std::string internal_run_death_test_;\n  bool list_tests_;\n  std::string output_;\n  bool print_time_;\n  internal::Int32 random_seed_;\n  internal::Int32 repeat_;\n  bool shuffle_;\n  internal::Int32 stack_trace_depth_;\n  std::string stream_result_to_;\n  bool throw_on_failure_;\n} GTEST_ATTRIBUTE_UNUSED_;\n\n// Converts a Unicode code point to a narrow string in UTF-8 encoding.\n// code_point parameter is of type UInt32 because wchar_t may not be\n// wide enough to contain a code point.\n// If the code_point is not a valid Unicode code point\n// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted\n// to \"(Invalid Unicode 0xXXXXXXXX)\".\nGTEST_API_ std::string CodePointToUtf8(UInt32 code_point);\n\n// Converts a wide string to a narrow string in UTF-8 encoding.\n// The wide string is assumed to have the following encoding:\n//   UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)\n//   UTF-32 if sizeof(wchar_t) == 4 (on Linux)\n// Parameter str points to a null-terminated wide string.\n// Parameter num_chars may additionally limit the number\n// of wchar_t characters processed. -1 is used when the entire string\n// should be processed.\n// If the string contains code points that are not valid Unicode code points\n// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output\n// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding\n// and contains invalid UTF-16 surrogate pairs, values in those pairs\n// will be encoded as individual Unicode characters from Basic Normal Plane.\nGTEST_API_ std::string WideStringToUtf8(const wchar_t* str, int num_chars);\n\n// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file\n// if the variable is present. If a file already exists at this location, this\n// function will write over it. If the variable is present, but the file cannot\n// be created, prints an error and exits.\nvoid WriteToShardStatusFileIfNeeded();\n\n// Checks whether sharding is enabled by examining the relevant\n// environment variable values. If the variables are present,\n// but inconsistent (e.g., shard_index >= total_shards), prints\n// an error and exits. If in_subprocess_for_death_test, sharding is\n// disabled because it must only be applied to the original test\n// process. Otherwise, we could filter out death tests we intended to execute.\nGTEST_API_ bool ShouldShard(const char* total_shards_str,\n                            const char* shard_index_str,\n                            bool in_subprocess_for_death_test);\n\n// Parses the environment variable var as an Int32. If it is unset,\n// returns default_val. If it is not an Int32, prints an error and\n// and aborts.\nGTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val);\n\n// Given the total number of shards, the shard index, and the test id,\n// returns true iff the test should be run on this shard. The test id is\n// some arbitrary but unique non-negative integer assigned to each test\n// method. Assumes that 0 <= shard_index < total_shards.\nGTEST_API_ bool ShouldRunTestOnShard(\n    int total_shards, int shard_index, int test_id);\n\n// STL container utilities.\n\n// Returns the number of elements in the given container that satisfy\n// the given predicate.\ntemplate <class Container, typename Predicate>\ninline int CountIf(const Container& c, Predicate predicate) {\n  // Implemented as an explicit loop since std::count_if() in libCstd on\n  // Solaris has a non-standard signature.\n  int count = 0;\n  for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) {\n    if (predicate(*it))\n      ++count;\n  }\n  return count;\n}\n\n// Applies a function/functor to each element in the container.\ntemplate <class Container, typename Functor>\nvoid ForEach(const Container& c, Functor functor) {\n  std::for_each(c.begin(), c.end(), functor);\n}\n\n// Returns the i-th element of the vector, or default_value if i is not\n// in range [0, v.size()).\ntemplate <typename E>\ninline E GetElementOr(const std::vector<E>& v, int i, E default_value) {\n  return (i < 0 || i >= static_cast<int>(v.size())) ? default_value : v[i];\n}\n\n// Performs an in-place shuffle of a range of the vector's elements.\n// 'begin' and 'end' are element indices as an STL-style range;\n// i.e. [begin, end) are shuffled, where 'end' == size() means to\n// shuffle to the end of the vector.\ntemplate <typename E>\nvoid ShuffleRange(internal::Random* random, int begin, int end,\n                  std::vector<E>* v) {\n  const int size = static_cast<int>(v->size());\n  GTEST_CHECK_(0 <= begin && begin <= size)\n      << \"Invalid shuffle range start \" << begin << \": must be in range [0, \"\n      << size << \"].\";\n  GTEST_CHECK_(begin <= end && end <= size)\n      << \"Invalid shuffle range finish \" << end << \": must be in range [\"\n      << begin << \", \" << size << \"].\";\n\n  // Fisher-Yates shuffle, from\n  // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle\n  for (int range_width = end - begin; range_width >= 2; range_width--) {\n    const int last_in_range = begin + range_width - 1;\n    const int selected = begin + random->Generate(range_width);\n    std::swap((*v)[selected], (*v)[last_in_range]);\n  }\n}\n\n// Performs an in-place shuffle of the vector's elements.\ntemplate <typename E>\ninline void Shuffle(internal::Random* random, std::vector<E>* v) {\n  ShuffleRange(random, 0, static_cast<int>(v->size()), v);\n}\n\n// A function for deleting an object.  Handy for being used as a\n// functor.\ntemplate <typename T>\nstatic void Delete(T* x) {\n  delete x;\n}\n\n// A predicate that checks the key of a TestProperty against a known key.\n//\n// TestPropertyKeyIs is copyable.\nclass TestPropertyKeyIs {\n public:\n  // Constructor.\n  //\n  // TestPropertyKeyIs has NO default constructor.\n  explicit TestPropertyKeyIs(const std::string& key) : key_(key) {}\n\n  // Returns true iff the test name of test property matches on key_.\n  bool operator()(const TestProperty& test_property) const {\n    return test_property.key() == key_;\n  }\n\n private:\n  std::string key_;\n};\n\n// Class UnitTestOptions.\n//\n// This class contains functions for processing options the user\n// specifies when running the tests.  It has only static members.\n//\n// In most cases, the user can specify an option using either an\n// environment variable or a command line flag.  E.g. you can set the\n// test filter using either GTEST_FILTER or --gtest_filter.  If both\n// the variable and the flag are present, the latter overrides the\n// former.\nclass GTEST_API_ UnitTestOptions {\n public:\n  // Functions for processing the gtest_output flag.\n\n  // Returns the output format, or \"\" for normal printed output.\n  static std::string GetOutputFormat();\n\n  // Returns the absolute path of the requested output file, or the\n  // default (test_detail.xml in the original working directory) if\n  // none was explicitly specified.\n  static std::string GetAbsolutePathToOutputFile();\n\n  // Functions for processing the gtest_filter flag.\n\n  // Returns true iff the wildcard pattern matches the string.  The\n  // first ':' or '\\0' character in pattern marks the end of it.\n  //\n  // This recursive algorithm isn't very efficient, but is clear and\n  // works well enough for matching test names, which are short.\n  static bool PatternMatchesString(const char *pattern, const char *str);\n\n  // Returns true iff the user-specified filter matches the test case\n  // name and the test name.\n  static bool FilterMatchesTest(const std::string &test_case_name,\n                                const std::string &test_name);\n\n#if GTEST_OS_WINDOWS\n  // Function for supporting the gtest_catch_exception flag.\n\n  // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the\n  // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.\n  // This function is useful as an __except condition.\n  static int GTestShouldProcessSEH(DWORD exception_code);\n#endif  // GTEST_OS_WINDOWS\n\n  // Returns true if \"name\" matches the ':' separated list of glob-style\n  // filters in \"filter\".\n  static bool MatchesFilter(const std::string& name, const char* filter);\n};\n\n// Returns the current application's name, removing directory path if that\n// is present.  Used by UnitTestOptions::GetOutputFile.\nGTEST_API_ FilePath GetCurrentExecutableName();\n\n// The role interface for getting the OS stack trace as a string.\nclass OsStackTraceGetterInterface {\n public:\n  OsStackTraceGetterInterface() {}\n  virtual ~OsStackTraceGetterInterface() {}\n\n  // Returns the current OS stack trace as an std::string.  Parameters:\n  //\n  //   max_depth  - the maximum number of stack frames to be included\n  //                in the trace.\n  //   skip_count - the number of top frames to be skipped; doesn't count\n  //                against max_depth.\n  virtual string CurrentStackTrace(int max_depth, int skip_count) = 0;\n\n  // UponLeavingGTest() should be called immediately before Google Test calls\n  // user code. It saves some information about the current stack that\n  // CurrentStackTrace() will use to find and hide Google Test stack frames.\n  virtual void UponLeavingGTest() = 0;\n\n  // This string is inserted in place of stack frames that are part of\n  // Google Test's implementation.\n  static const char* const kElidedFramesMarker;\n\n private:\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface);\n};\n\n// A working implementation of the OsStackTraceGetterInterface interface.\nclass OsStackTraceGetter : public OsStackTraceGetterInterface {\n public:\n  OsStackTraceGetter() {}\n\n  virtual string CurrentStackTrace(int max_depth, int skip_count);\n  virtual void UponLeavingGTest();\n\n private:\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter);\n};\n\n// Information about a Google Test trace point.\nstruct TraceInfo {\n  const char* file;\n  int line;\n  std::string message;\n};\n\n// This is the default global test part result reporter used in UnitTestImpl.\n// This class should only be used by UnitTestImpl.\nclass DefaultGlobalTestPartResultReporter\n  : public TestPartResultReporterInterface {\n public:\n  explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test);\n  // Implements the TestPartResultReporterInterface. Reports the test part\n  // result in the current test.\n  virtual void ReportTestPartResult(const TestPartResult& result);\n\n private:\n  UnitTestImpl* const unit_test_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter);\n};\n\n// This is the default per thread test part result reporter used in\n// UnitTestImpl. This class should only be used by UnitTestImpl.\nclass DefaultPerThreadTestPartResultReporter\n    : public TestPartResultReporterInterface {\n public:\n  explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test);\n  // Implements the TestPartResultReporterInterface. The implementation just\n  // delegates to the current global test part result reporter of *unit_test_.\n  virtual void ReportTestPartResult(const TestPartResult& result);\n\n private:\n  UnitTestImpl* const unit_test_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter);\n};\n\n// The private implementation of the UnitTest class.  We don't protect\n// the methods under a mutex, as this class is not accessible by a\n// user and the UnitTest class that delegates work to this class does\n// proper locking.\nclass GTEST_API_ UnitTestImpl {\n public:\n  explicit UnitTestImpl(UnitTest* parent);\n  virtual ~UnitTestImpl();\n\n  // There are two different ways to register your own TestPartResultReporter.\n  // You can register your own repoter to listen either only for test results\n  // from the current thread or for results from all threads.\n  // By default, each per-thread test result repoter just passes a new\n  // TestPartResult to the global test result reporter, which registers the\n  // test part result for the currently running test.\n\n  // Returns the global test part result reporter.\n  TestPartResultReporterInterface* GetGlobalTestPartResultReporter();\n\n  // Sets the global test part result reporter.\n  void SetGlobalTestPartResultReporter(\n      TestPartResultReporterInterface* reporter);\n\n  // Returns the test part result reporter for the current thread.\n  TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread();\n\n  // Sets the test part result reporter for the current thread.\n  void SetTestPartResultReporterForCurrentThread(\n      TestPartResultReporterInterface* reporter);\n\n  // Gets the number of successful test cases.\n  int successful_test_case_count() const;\n\n  // Gets the number of failed test cases.\n  int failed_test_case_count() const;\n\n  // Gets the number of all test cases.\n  int total_test_case_count() const;\n\n  // Gets the number of all test cases that contain at least one test\n  // that should run.\n  int test_case_to_run_count() const;\n\n  // Gets the number of successful tests.\n  int successful_test_count() const;\n\n  // Gets the number of failed tests.\n  int failed_test_count() const;\n\n  // Gets the number of disabled tests that will be reported in the XML report.\n  int reportable_disabled_test_count() const;\n\n  // Gets the number of disabled tests.\n  int disabled_test_count() const;\n\n  // Gets the number of tests to be printed in the XML report.\n  int reportable_test_count() const;\n\n  // Gets the number of all tests.\n  int total_test_count() const;\n\n  // Gets the number of tests that should run.\n  int test_to_run_count() const;\n\n  // Gets the time of the test program start, in ms from the start of the\n  // UNIX epoch.\n  TimeInMillis start_timestamp() const { return start_timestamp_; }\n\n  // Gets the elapsed time, in milliseconds.\n  TimeInMillis elapsed_time() const { return elapsed_time_; }\n\n  // Returns true iff the unit test passed (i.e. all test cases passed).\n  bool Passed() const { return !Failed(); }\n\n  // Returns true iff the unit test failed (i.e. some test case failed\n  // or something outside of all tests failed).\n  bool Failed() const {\n    return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed();\n  }\n\n  // Gets the i-th test case among all the test cases. i can range from 0 to\n  // total_test_case_count() - 1. If i is not in that range, returns NULL.\n  const TestCase* GetTestCase(int i) const {\n    const int index = GetElementOr(test_case_indices_, i, -1);\n    return index < 0 ? NULL : test_cases_[i];\n  }\n\n  // Gets the i-th test case among all the test cases. i can range from 0 to\n  // total_test_case_count() - 1. If i is not in that range, returns NULL.\n  TestCase* GetMutableTestCase(int i) {\n    const int index = GetElementOr(test_case_indices_, i, -1);\n    return index < 0 ? NULL : test_cases_[index];\n  }\n\n  // Provides access to the event listener list.\n  TestEventListeners* listeners() { return &listeners_; }\n\n  // Returns the TestResult for the test that's currently running, or\n  // the TestResult for the ad hoc test if no test is running.\n  TestResult* current_test_result();\n\n  // Returns the TestResult for the ad hoc test.\n  const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; }\n\n  // Sets the OS stack trace getter.\n  //\n  // Does nothing if the input and the current OS stack trace getter\n  // are the same; otherwise, deletes the old getter and makes the\n  // input the current getter.\n  void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter);\n\n  // Returns the current OS stack trace getter if it is not NULL;\n  // otherwise, creates an OsStackTraceGetter, makes it the current\n  // getter, and returns it.\n  OsStackTraceGetterInterface* os_stack_trace_getter();\n\n  // Returns the current OS stack trace as an std::string.\n  //\n  // The maximum number of stack frames to be included is specified by\n  // the gtest_stack_trace_depth flag.  The skip_count parameter\n  // specifies the number of top frames to be skipped, which doesn't\n  // count against the number of frames to be included.\n  //\n  // For example, if Foo() calls Bar(), which in turn calls\n  // CurrentOsStackTraceExceptTop(1), Foo() will be included in the\n  // trace but Bar() and CurrentOsStackTraceExceptTop() won't.\n  std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_;\n\n  // Finds and returns a TestCase with the given name.  If one doesn't\n  // exist, creates one and returns it.\n  //\n  // Arguments:\n  //\n  //   test_case_name: name of the test case\n  //   type_param:     the name of the test's type parameter, or NULL if\n  //                   this is not a typed or a type-parameterized test.\n  //   set_up_tc:      pointer to the function that sets up the test case\n  //   tear_down_tc:   pointer to the function that tears down the test case\n  TestCase* GetTestCase(const char* test_case_name,\n                        const char* type_param,\n                        Test::SetUpTestCaseFunc set_up_tc,\n                        Test::TearDownTestCaseFunc tear_down_tc);\n\n  // Adds a TestInfo to the unit test.\n  //\n  // Arguments:\n  //\n  //   set_up_tc:    pointer to the function that sets up the test case\n  //   tear_down_tc: pointer to the function that tears down the test case\n  //   test_info:    the TestInfo object\n  void AddTestInfo(Test::SetUpTestCaseFunc set_up_tc,\n                   Test::TearDownTestCaseFunc tear_down_tc,\n                   TestInfo* test_info) {\n    // In order to support thread-safe death tests, we need to\n    // remember the original working directory when the test program\n    // was first invoked.  We cannot do this in RUN_ALL_TESTS(), as\n    // the user may have changed the current directory before calling\n    // RUN_ALL_TESTS().  Therefore we capture the current directory in\n    // AddTestInfo(), which is called to register a TEST or TEST_F\n    // before main() is reached.\n    if (original_working_dir_.IsEmpty()) {\n      original_working_dir_.Set(FilePath::GetCurrentDir());\n      GTEST_CHECK_(!original_working_dir_.IsEmpty())\n          << \"Failed to get the current working directory.\";\n    }\n\n    GetTestCase(test_info->test_case_name(),\n                test_info->type_param(),\n                set_up_tc,\n                tear_down_tc)->AddTestInfo(test_info);\n  }\n\n#if GTEST_HAS_PARAM_TEST\n  // Returns ParameterizedTestCaseRegistry object used to keep track of\n  // value-parameterized tests and instantiate and register them.\n  internal::ParameterizedTestCaseRegistry& parameterized_test_registry() {\n    return parameterized_test_registry_;\n  }\n#endif  // GTEST_HAS_PARAM_TEST\n\n  // Sets the TestCase object for the test that's currently running.\n  void set_current_test_case(TestCase* a_current_test_case) {\n    current_test_case_ = a_current_test_case;\n  }\n\n  // Sets the TestInfo object for the test that's currently running.  If\n  // current_test_info is NULL, the assertion results will be stored in\n  // ad_hoc_test_result_.\n  void set_current_test_info(TestInfo* a_current_test_info) {\n    current_test_info_ = a_current_test_info;\n  }\n\n  // Registers all parameterized tests defined using TEST_P and\n  // INSTANTIATE_TEST_CASE_P, creating regular tests for each test/parameter\n  // combination. This method can be called more then once; it has guards\n  // protecting from registering the tests more then once.  If\n  // value-parameterized tests are disabled, RegisterParameterizedTests is\n  // present but does nothing.\n  void RegisterParameterizedTests();\n\n  // Runs all tests in this UnitTest object, prints the result, and\n  // returns true if all tests are successful.  If any exception is\n  // thrown during a test, this test is considered to be failed, but\n  // the rest of the tests will still be run.\n  bool RunAllTests();\n\n  // Clears the results of all tests, except the ad hoc tests.\n  void ClearNonAdHocTestResult() {\n    ForEach(test_cases_, TestCase::ClearTestCaseResult);\n  }\n\n  // Clears the results of ad-hoc test assertions.\n  void ClearAdHocTestResult() {\n    ad_hoc_test_result_.Clear();\n  }\n\n  // Adds a TestProperty to the current TestResult object when invoked in a\n  // context of a test or a test case, or to the global property set. If the\n  // result already contains a property with the same key, the value will be\n  // updated.\n  void RecordProperty(const TestProperty& test_property);\n\n  enum ReactionToSharding {\n    HONOR_SHARDING_PROTOCOL,\n    IGNORE_SHARDING_PROTOCOL\n  };\n\n  // Matches the full name of each test against the user-specified\n  // filter to decide whether the test should run, then records the\n  // result in each TestCase and TestInfo object.\n  // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests\n  // based on sharding variables in the environment.\n  // Returns the number of tests that should run.\n  int FilterTests(ReactionToSharding shard_tests);\n\n  // Prints the names of the tests matching the user-specified filter flag.\n  void ListTestsMatchingFilter();\n\n  const TestCase* current_test_case() const { return current_test_case_; }\n  TestInfo* current_test_info() { return current_test_info_; }\n  const TestInfo* current_test_info() const { return current_test_info_; }\n\n  // Returns the vector of environments that need to be set-up/torn-down\n  // before/after the tests are run.\n  std::vector<Environment*>& environments() { return environments_; }\n\n  // Getters for the per-thread Google Test trace stack.\n  std::vector<TraceInfo>& gtest_trace_stack() {\n    return *(gtest_trace_stack_.pointer());\n  }\n  const std::vector<TraceInfo>& gtest_trace_stack() const {\n    return gtest_trace_stack_.get();\n  }\n\n#if GTEST_HAS_DEATH_TEST\n  void InitDeathTestSubprocessControlInfo() {\n    internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag());\n  }\n  // Returns a pointer to the parsed --gtest_internal_run_death_test\n  // flag, or NULL if that flag was not specified.\n  // This information is useful only in a death test child process.\n  // Must not be called before a call to InitGoogleTest.\n  const InternalRunDeathTestFlag* internal_run_death_test_flag() const {\n    return internal_run_death_test_flag_.get();\n  }\n\n  // Returns a pointer to the current death test factory.\n  internal::DeathTestFactory* death_test_factory() {\n    return death_test_factory_.get();\n  }\n\n  void SuppressTestEventsIfInSubprocess();\n\n  friend class ReplaceDeathTestFactory;\n#endif  // GTEST_HAS_DEATH_TEST\n\n  // Initializes the event listener performing XML output as specified by\n  // UnitTestOptions. Must not be called before InitGoogleTest.\n  void ConfigureXmlOutput();\n\n#if GTEST_CAN_STREAM_RESULTS_\n  // Initializes the event listener for streaming test results to a socket.\n  // Must not be called before InitGoogleTest.\n  void ConfigureStreamingOutput();\n#endif\n\n  // Performs initialization dependent upon flag values obtained in\n  // ParseGoogleTestFlagsOnly.  Is called from InitGoogleTest after the call to\n  // ParseGoogleTestFlagsOnly.  In case a user neglects to call InitGoogleTest\n  // this function is also called from RunAllTests.  Since this function can be\n  // called more than once, it has to be idempotent.\n  void PostFlagParsingInit();\n\n  // Gets the random seed used at the start of the current test iteration.\n  int random_seed() const { return random_seed_; }\n\n  // Gets the random number generator.\n  internal::Random* random() { return &random_; }\n\n  // Shuffles all test cases, and the tests within each test case,\n  // making sure that death tests are still run first.\n  void ShuffleTests();\n\n  // Restores the test cases and tests to their order before the first shuffle.\n  void UnshuffleTests();\n\n  // Returns the value of GTEST_FLAG(catch_exceptions) at the moment\n  // UnitTest::Run() starts.\n  bool catch_exceptions() const { return catch_exceptions_; }\n\n private:\n  friend class ::testing::UnitTest;\n\n  // Used by UnitTest::Run() to capture the state of\n  // GTEST_FLAG(catch_exceptions) at the moment it starts.\n  void set_catch_exceptions(bool value) { catch_exceptions_ = value; }\n\n  // The UnitTest object that owns this implementation object.\n  UnitTest* const parent_;\n\n  // The working directory when the first TEST() or TEST_F() was\n  // executed.\n  internal::FilePath original_working_dir_;\n\n  // The default test part result reporters.\n  DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_;\n  DefaultPerThreadTestPartResultReporter\n      default_per_thread_test_part_result_reporter_;\n\n  // Points to (but doesn't own) the global test part result reporter.\n  TestPartResultReporterInterface* global_test_part_result_repoter_;\n\n  // Protects read and write access to global_test_part_result_reporter_.\n  internal::Mutex global_test_part_result_reporter_mutex_;\n\n  // Points to (but doesn't own) the per-thread test part result reporter.\n  internal::ThreadLocal<TestPartResultReporterInterface*>\n      per_thread_test_part_result_reporter_;\n\n  // The vector of environments that need to be set-up/torn-down\n  // before/after the tests are run.\n  std::vector<Environment*> environments_;\n\n  // The vector of TestCases in their original order.  It owns the\n  // elements in the vector.\n  std::vector<TestCase*> test_cases_;\n\n  // Provides a level of indirection for the test case list to allow\n  // easy shuffling and restoring the test case order.  The i-th\n  // element of this vector is the index of the i-th test case in the\n  // shuffled order.\n  std::vector<int> test_case_indices_;\n\n#if GTEST_HAS_PARAM_TEST\n  // ParameterizedTestRegistry object used to register value-parameterized\n  // tests.\n  internal::ParameterizedTestCaseRegistry parameterized_test_registry_;\n\n  // Indicates whether RegisterParameterizedTests() has been called already.\n  bool parameterized_tests_registered_;\n#endif  // GTEST_HAS_PARAM_TEST\n\n  // Index of the last death test case registered.  Initially -1.\n  int last_death_test_case_;\n\n  // This points to the TestCase for the currently running test.  It\n  // changes as Google Test goes through one test case after another.\n  // When no test is running, this is set to NULL and Google Test\n  // stores assertion results in ad_hoc_test_result_.  Initially NULL.\n  TestCase* current_test_case_;\n\n  // This points to the TestInfo for the currently running test.  It\n  // changes as Google Test goes through one test after another.  When\n  // no test is running, this is set to NULL and Google Test stores\n  // assertion results in ad_hoc_test_result_.  Initially NULL.\n  TestInfo* current_test_info_;\n\n  // Normally, a user only writes assertions inside a TEST or TEST_F,\n  // or inside a function called by a TEST or TEST_F.  Since Google\n  // Test keeps track of which test is current running, it can\n  // associate such an assertion with the test it belongs to.\n  //\n  // If an assertion is encountered when no TEST or TEST_F is running,\n  // Google Test attributes the assertion result to an imaginary \"ad hoc\"\n  // test, and records the result in ad_hoc_test_result_.\n  TestResult ad_hoc_test_result_;\n\n  // The list of event listeners that can be used to track events inside\n  // Google Test.\n  TestEventListeners listeners_;\n\n  // The OS stack trace getter.  Will be deleted when the UnitTest\n  // object is destructed.  By default, an OsStackTraceGetter is used,\n  // but the user can set this field to use a custom getter if that is\n  // desired.\n  OsStackTraceGetterInterface* os_stack_trace_getter_;\n\n  // True iff PostFlagParsingInit() has been called.\n  bool post_flag_parse_init_performed_;\n\n  // The random number seed used at the beginning of the test run.\n  int random_seed_;\n\n  // Our random number generator.\n  internal::Random random_;\n\n  // The time of the test program start, in ms from the start of the\n  // UNIX epoch.\n  TimeInMillis start_timestamp_;\n\n  // How long the test took to run, in milliseconds.\n  TimeInMillis elapsed_time_;\n\n#if GTEST_HAS_DEATH_TEST\n  // The decomposed components of the gtest_internal_run_death_test flag,\n  // parsed when RUN_ALL_TESTS is called.\n  internal::scoped_ptr<InternalRunDeathTestFlag> internal_run_death_test_flag_;\n  internal::scoped_ptr<internal::DeathTestFactory> death_test_factory_;\n#endif  // GTEST_HAS_DEATH_TEST\n\n  // A per-thread stack of traces created by the SCOPED_TRACE() macro.\n  internal::ThreadLocal<std::vector<TraceInfo> > gtest_trace_stack_;\n\n  // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests()\n  // starts.\n  bool catch_exceptions_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl);\n};  // class UnitTestImpl\n\n// Convenience function for accessing the global UnitTest\n// implementation object.\ninline UnitTestImpl* GetUnitTestImpl() {\n  return UnitTest::GetInstance()->impl();\n}\n\n#if GTEST_USES_SIMPLE_RE\n\n// Internal helper functions for implementing the simple regular\n// expression matcher.\nGTEST_API_ bool IsInSet(char ch, const char* str);\nGTEST_API_ bool IsAsciiDigit(char ch);\nGTEST_API_ bool IsAsciiPunct(char ch);\nGTEST_API_ bool IsRepeat(char ch);\nGTEST_API_ bool IsAsciiWhiteSpace(char ch);\nGTEST_API_ bool IsAsciiWordChar(char ch);\nGTEST_API_ bool IsValidEscape(char ch);\nGTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch);\nGTEST_API_ bool ValidateRegex(const char* regex);\nGTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str);\nGTEST_API_ bool MatchRepetitionAndRegexAtHead(\n    bool escaped, char ch, char repeat, const char* regex, const char* str);\nGTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str);\n\n#endif  // GTEST_USES_SIMPLE_RE\n\n// Parses the command line for Google Test flags, without initializing\n// other parts of Google Test.\nGTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv);\nGTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv);\n\n#if GTEST_HAS_DEATH_TEST\n\n// Returns the message describing the last system error, regardless of the\n// platform.\nGTEST_API_ std::string GetLastErrnoDescription();\n\n// Attempts to parse a string into a positive integer pointed to by the\n// number parameter.  Returns true if that is possible.\n// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use\n// it here.\ntemplate <typename Integer>\nbool ParseNaturalNumber(const ::std::string& str, Integer* number) {\n  // Fail fast if the given string does not begin with a digit;\n  // this bypasses strtoXXX's \"optional leading whitespace and plus\n  // or minus sign\" semantics, which are undesirable here.\n  if (str.empty() || !IsDigit(str[0])) {\n    return false;\n  }\n  errno = 0;\n\n  char* end;\n  // BiggestConvertible is the largest integer type that system-provided\n  // string-to-number conversion routines can return.\n\n# if GTEST_OS_WINDOWS && !defined(__GNUC__)\n\n  // MSVC and C++ Builder define __int64 instead of the standard long long.\n  typedef unsigned __int64 BiggestConvertible;\n  const BiggestConvertible parsed = _strtoui64(str.c_str(), &end, 10);\n\n# else\n\n  typedef unsigned long long BiggestConvertible;  // NOLINT\n  const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10);\n\n# endif  // GTEST_OS_WINDOWS && !defined(__GNUC__)\n\n  const bool parse_success = *end == '\\0' && errno == 0;\n\n  // TODO(vladl@google.com): Convert this to compile time assertion when it is\n  // available.\n  GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed));\n\n  const Integer result = static_cast<Integer>(parsed);\n  if (parse_success && static_cast<BiggestConvertible>(result) == parsed) {\n    *number = result;\n    return true;\n  }\n  return false;\n}\n#endif  // GTEST_HAS_DEATH_TEST\n\n// TestResult contains some private methods that should be hidden from\n// Google Test user but are required for testing. This class allow our tests\n// to access them.\n//\n// This class is supplied only for the purpose of testing Google Test's own\n// constructs. Do not use it in user tests, either directly or indirectly.\nclass TestResultAccessor {\n public:\n  static void RecordProperty(TestResult* test_result,\n                             const std::string& xml_element,\n                             const TestProperty& property) {\n    test_result->RecordProperty(xml_element, property);\n  }\n\n  static void ClearTestPartResults(TestResult* test_result) {\n    test_result->ClearTestPartResults();\n  }\n\n  static const std::vector<testing::TestPartResult>& test_part_results(\n      const TestResult& test_result) {\n    return test_result.test_part_results();\n  }\n};\n\n#if GTEST_CAN_STREAM_RESULTS_\n\n// Streams test results to the given port on the given host machine.\nclass GTEST_API_ StreamingListener : public EmptyTestEventListener {\n public:\n  // Abstract base class for writing strings to a socket.\n  class AbstractSocketWriter {\n   public:\n    virtual ~AbstractSocketWriter() {}\n\n    // Sends a string to the socket.\n    virtual void Send(const string& message) = 0;\n\n    // Closes the socket.\n    virtual void CloseConnection() {}\n\n    // Sends a string and a newline to the socket.\n    void SendLn(const string& message) {\n      Send(message + \"\\n\");\n    }\n  };\n\n  // Concrete class for actually writing strings to a socket.\n  class SocketWriter : public AbstractSocketWriter {\n   public:\n    SocketWriter(const string& host, const string& port)\n        : sockfd_(-1), host_name_(host), port_num_(port) {\n      MakeConnection();\n    }\n\n    virtual ~SocketWriter() {\n      if (sockfd_ != -1)\n        CloseConnection();\n    }\n\n    // Sends a string to the socket.\n    virtual void Send(const string& message) {\n      GTEST_CHECK_(sockfd_ != -1)\n          << \"Send() can be called only when there is a connection.\";\n\n      const int len = static_cast<int>(message.length());\n      if (write(sockfd_, message.c_str(), len) != len) {\n        GTEST_LOG_(WARNING)\n            << \"stream_result_to: failed to stream to \"\n            << host_name_ << \":\" << port_num_;\n      }\n    }\n\n   private:\n    // Creates a client socket and connects to the server.\n    void MakeConnection();\n\n    // Closes the socket.\n    void CloseConnection() {\n      GTEST_CHECK_(sockfd_ != -1)\n          << \"CloseConnection() can be called only when there is a connection.\";\n\n      close(sockfd_);\n      sockfd_ = -1;\n    }\n\n    int sockfd_;  // socket file descriptor\n    const string host_name_;\n    const string port_num_;\n\n    GTEST_DISALLOW_COPY_AND_ASSIGN_(SocketWriter);\n  };  // class SocketWriter\n\n  // Escapes '=', '&', '%', and '\\n' characters in str as \"%xx\".\n  static string UrlEncode(const char* str);\n\n  StreamingListener(const string& host, const string& port)\n      : socket_writer_(new SocketWriter(host, port)) { Start(); }\n\n  explicit StreamingListener(AbstractSocketWriter* socket_writer)\n      : socket_writer_(socket_writer) { Start(); }\n\n  void OnTestProgramStart(const UnitTest& /* unit_test */) {\n    SendLn(\"event=TestProgramStart\");\n  }\n\n  void OnTestProgramEnd(const UnitTest& unit_test) {\n    // Note that Google Test current only report elapsed time for each\n    // test iteration, not for the entire test program.\n    SendLn(\"event=TestProgramEnd&passed=\" + FormatBool(unit_test.Passed()));\n\n    // Notify the streaming server to stop.\n    socket_writer_->CloseConnection();\n  }\n\n  void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) {\n    SendLn(\"event=TestIterationStart&iteration=\" +\n           StreamableToString(iteration));\n  }\n\n  void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) {\n    SendLn(\"event=TestIterationEnd&passed=\" +\n           FormatBool(unit_test.Passed()) + \"&elapsed_time=\" +\n           StreamableToString(unit_test.elapsed_time()) + \"ms\");\n  }\n\n  void OnTestCaseStart(const TestCase& test_case) {\n    SendLn(std::string(\"event=TestCaseStart&name=\") + test_case.name());\n  }\n\n  void OnTestCaseEnd(const TestCase& test_case) {\n    SendLn(\"event=TestCaseEnd&passed=\" + FormatBool(test_case.Passed())\n           + \"&elapsed_time=\" + StreamableToString(test_case.elapsed_time())\n           + \"ms\");\n  }\n\n  void OnTestStart(const TestInfo& test_info) {\n    SendLn(std::string(\"event=TestStart&name=\") + test_info.name());\n  }\n\n  void OnTestEnd(const TestInfo& test_info) {\n    SendLn(\"event=TestEnd&passed=\" +\n           FormatBool((test_info.result())->Passed()) +\n           \"&elapsed_time=\" +\n           StreamableToString((test_info.result())->elapsed_time()) + \"ms\");\n  }\n\n  void OnTestPartResult(const TestPartResult& test_part_result) {\n    const char* file_name = test_part_result.file_name();\n    if (file_name == NULL)\n      file_name = \"\";\n    SendLn(\"event=TestPartResult&file=\" + UrlEncode(file_name) +\n           \"&line=\" + StreamableToString(test_part_result.line_number()) +\n           \"&message=\" + UrlEncode(test_part_result.message()));\n  }\n\n private:\n  // Sends the given message and a newline to the socket.\n  void SendLn(const string& message) { socket_writer_->SendLn(message); }\n\n  // Called at the start of streaming to notify the receiver what\n  // protocol we are using.\n  void Start() { SendLn(\"gtest_streaming_protocol_version=1.0\"); }\n\n  string FormatBool(bool value) { return value ? \"1\" : \"0\"; }\n\n  const scoped_ptr<AbstractSocketWriter> socket_writer_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener);\n};  // class StreamingListener\n\n#endif  // GTEST_CAN_STREAM_RESULTS_\n\n}  // namespace internal\n}  // namespace testing\n\n#endif  // GTEST_SRC_GTEST_INTERNAL_INL_H_\n#undef GTEST_IMPLEMENTATION_\n\n#if GTEST_OS_WINDOWS\n# define vsnprintf _vsnprintf\n#endif  // GTEST_OS_WINDOWS\n\nnamespace testing {\n\nusing internal::CountIf;\nusing internal::ForEach;\nusing internal::GetElementOr;\nusing internal::Shuffle;\n\n// Constants.\n\n// A test whose test case name or test name matches this filter is\n// disabled and not run.\nstatic const char kDisableTestFilter[] = \"DISABLED_*:*/DISABLED_*\";\n\n// A test case whose name matches this filter is considered a death\n// test case and will be run before test cases whose name doesn't\n// match this filter.\nstatic const char kDeathTestCaseFilter[] = \"*DeathTest:*DeathTest/*\";\n\n// A test filter that matches everything.\nstatic const char kUniversalFilter[] = \"*\";\n\n// The default output file for XML output.\nstatic const char kDefaultOutputFile[] = \"test_detail.xml\";\n\n// The environment variable name for the test shard index.\nstatic const char kTestShardIndex[] = \"GTEST_SHARD_INDEX\";\n// The environment variable name for the total number of test shards.\nstatic const char kTestTotalShards[] = \"GTEST_TOTAL_SHARDS\";\n// The environment variable name for the test shard status file.\nstatic const char kTestShardStatusFile[] = \"GTEST_SHARD_STATUS_FILE\";\n\nnamespace internal {\n\n// The text used in failure messages to indicate the start of the\n// stack trace.\nconst char kStackTraceMarker[] = \"\\nStack trace:\\n\";\n\n// g_help_flag is true iff the --help flag or an equivalent form is\n// specified on the command line.\nbool g_help_flag = false;\n\n}  // namespace internal\n\nstatic const char* GetDefaultFilter() {\n#ifdef GTEST_TEST_FILTER_ENV_VAR_\n  const char* const testbridge_test_only = getenv(GTEST_TEST_FILTER_ENV_VAR_);\n  if (testbridge_test_only != NULL) {\n    return testbridge_test_only;\n  }\n#endif  // GTEST_TEST_FILTER_ENV_VAR_\n  return kUniversalFilter;\n}\n\nGTEST_DEFINE_bool_(\n    also_run_disabled_tests,\n    internal::BoolFromGTestEnv(\"also_run_disabled_tests\", false),\n    \"Run disabled tests too, in addition to the tests normally being run.\");\n\nGTEST_DEFINE_bool_(\n    break_on_failure,\n    internal::BoolFromGTestEnv(\"break_on_failure\", false),\n    \"True iff a failed assertion should be a debugger break-point.\");\n\nGTEST_DEFINE_bool_(\n    catch_exceptions,\n    internal::BoolFromGTestEnv(\"catch_exceptions\", true),\n    \"True iff \" GTEST_NAME_\n    \" should catch exceptions and treat them as test failures.\");\n\nGTEST_DEFINE_string_(\n    color,\n    internal::StringFromGTestEnv(\"color\", \"auto\"),\n    \"Whether to use colors in the output.  Valid values: yes, no, \"\n    \"and auto.  'auto' means to use colors if the output is \"\n    \"being sent to a terminal and the TERM environment variable \"\n    \"is set to a terminal type that supports colors.\");\n\nGTEST_DEFINE_string_(\n    filter,\n    internal::StringFromGTestEnv(\"filter\", GetDefaultFilter()),\n    \"A colon-separated list of glob (not regex) patterns \"\n    \"for filtering the tests to run, optionally followed by a \"\n    \"'-' and a : separated list of negative patterns (tests to \"\n    \"exclude).  A test is run if it matches one of the positive \"\n    \"patterns and does not match any of the negative patterns.\");\n\nGTEST_DEFINE_bool_(list_tests, false,\n                   \"List all tests without running them.\");\n\nGTEST_DEFINE_string_(\n    output,\n    internal::StringFromGTestEnv(\"output\", \"\"),\n    \"A format (currently must be \\\"xml\\\"), optionally followed \"\n    \"by a colon and an output file name or directory. A directory \"\n    \"is indicated by a trailing pathname separator. \"\n    \"Examples: \\\"xml:filename.xml\\\", \\\"xml::directoryname/\\\". \"\n    \"If a directory is specified, output files will be created \"\n    \"within that directory, with file-names based on the test \"\n    \"executable's name and, if necessary, made unique by adding \"\n    \"digits.\");\n\nGTEST_DEFINE_bool_(\n    print_time,\n    internal::BoolFromGTestEnv(\"print_time\", true),\n    \"True iff \" GTEST_NAME_\n    \" should display elapsed time in text output.\");\n\nGTEST_DEFINE_int32_(\n    random_seed,\n    internal::Int32FromGTestEnv(\"random_seed\", 0),\n    \"Random number seed to use when shuffling test orders.  Must be in range \"\n    \"[1, 99999], or 0 to use a seed based on the current time.\");\n\nGTEST_DEFINE_int32_(\n    repeat,\n    internal::Int32FromGTestEnv(\"repeat\", 1),\n    \"How many times to repeat each test.  Specify a negative number \"\n    \"for repeating forever.  Useful for shaking out flaky tests.\");\n\nGTEST_DEFINE_bool_(\n    show_internal_stack_frames, false,\n    \"True iff \" GTEST_NAME_ \" should include internal stack frames when \"\n    \"printing test failure stack traces.\");\n\nGTEST_DEFINE_bool_(\n    shuffle,\n    internal::BoolFromGTestEnv(\"shuffle\", false),\n    \"True iff \" GTEST_NAME_\n    \" should randomize tests' order on every run.\");\n\nGTEST_DEFINE_int32_(\n    stack_trace_depth,\n    internal::Int32FromGTestEnv(\"stack_trace_depth\", kMaxStackTraceDepth),\n    \"The maximum number of stack frames to print when an \"\n    \"assertion fails.  The valid range is 0 through 100, inclusive.\");\n\nGTEST_DEFINE_string_(\n    stream_result_to,\n    internal::StringFromGTestEnv(\"stream_result_to\", \"\"),\n    \"This flag specifies the host name and the port number on which to stream \"\n    \"test results. Example: \\\"localhost:555\\\". The flag is effective only on \"\n    \"Linux.\");\n\nGTEST_DEFINE_bool_(\n    throw_on_failure,\n    internal::BoolFromGTestEnv(\"throw_on_failure\", false),\n    \"When this flag is specified, a failed assertion will throw an exception \"\n    \"if exceptions are enabled or exit the program with a non-zero code \"\n    \"otherwise.\");\n\n#if GTEST_USE_OWN_FLAGFILE_FLAG_\nGTEST_DEFINE_string_(\n    flagfile,\n    internal::StringFromGTestEnv(\"flagfile\", \"\"),\n    \"This flag specifies the flagfile to read command-line flags from.\");\n#endif  // GTEST_USE_OWN_FLAGFILE_FLAG_\n\nnamespace internal {\n\n// Generates a random number from [0, range), using a Linear\n// Congruential Generator (LCG).  Crashes if 'range' is 0 or greater\n// than kMaxRange.\nUInt32 Random::Generate(UInt32 range) {\n  // These constants are the same as are used in glibc's rand(3).\n  state_ = (1103515245U*state_ + 12345U) % kMaxRange;\n\n  GTEST_CHECK_(range > 0)\n      << \"Cannot generate a number in the range [0, 0).\";\n  GTEST_CHECK_(range <= kMaxRange)\n      << \"Generation of a number in [0, \" << range << \") was requested, \"\n      << \"but this can only generate numbers in [0, \" << kMaxRange << \").\";\n\n  // Converting via modulus introduces a bit of downward bias, but\n  // it's simple, and a linear congruential generator isn't too good\n  // to begin with.\n  return state_ % range;\n}\n\n// GTestIsInitialized() returns true iff the user has initialized\n// Google Test.  Useful for catching the user mistake of not initializing\n// Google Test before calling RUN_ALL_TESTS().\nstatic bool GTestIsInitialized() { return GetArgvs().size() > 0; }\n\n// Iterates over a vector of TestCases, keeping a running sum of the\n// results of calling a given int-returning method on each.\n// Returns the sum.\nstatic int SumOverTestCaseList(const std::vector<TestCase*>& case_list,\n                               int (TestCase::*method)() const) {\n  int sum = 0;\n  for (size_t i = 0; i < case_list.size(); i++) {\n    sum += (case_list[i]->*method)();\n  }\n  return sum;\n}\n\n// Returns true iff the test case passed.\nstatic bool TestCasePassed(const TestCase* test_case) {\n  return test_case->should_run() && test_case->Passed();\n}\n\n// Returns true iff the test case failed.\nstatic bool TestCaseFailed(const TestCase* test_case) {\n  return test_case->should_run() && test_case->Failed();\n}\n\n// Returns true iff test_case contains at least one test that should\n// run.\nstatic bool ShouldRunTestCase(const TestCase* test_case) {\n  return test_case->should_run();\n}\n\n// AssertHelper constructor.\nAssertHelper::AssertHelper(TestPartResult::Type type,\n                           const char* file,\n                           int line,\n                           const char* message)\n    : data_(new AssertHelperData(type, file, line, message)) {\n}\n\nAssertHelper::~AssertHelper() {\n  delete data_;\n}\n\n// Message assignment, for assertion streaming support.\nvoid AssertHelper::operator=(const Message& message) const {\n  UnitTest::GetInstance()->\n    AddTestPartResult(data_->type, data_->file, data_->line,\n                      AppendUserMessage(data_->message, message),\n                      UnitTest::GetInstance()->impl()\n                      ->CurrentOsStackTraceExceptTop(1)\n                      // Skips the stack frame for this function itself.\n                      );  // NOLINT\n}\n\n// Mutex for linked pointers.\nGTEST_API_ GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex);\n\n// A copy of all command line arguments.  Set by InitGoogleTest().\n::std::vector<testing::internal::string> g_argvs;\n\nconst ::std::vector<testing::internal::string>& GetArgvs() {\n#if defined(GTEST_CUSTOM_GET_ARGVS_)\n  return GTEST_CUSTOM_GET_ARGVS_();\n#else  // defined(GTEST_CUSTOM_GET_ARGVS_)\n  return g_argvs;\n#endif  // defined(GTEST_CUSTOM_GET_ARGVS_)\n}\n\n// Returns the current application's name, removing directory path if that\n// is present.\nFilePath GetCurrentExecutableName() {\n  FilePath result;\n\n#if GTEST_OS_WINDOWS\n  result.Set(FilePath(GetArgvs()[0]).RemoveExtension(\"exe\"));\n#else\n  result.Set(FilePath(GetArgvs()[0]));\n#endif  // GTEST_OS_WINDOWS\n\n  return result.RemoveDirectoryName();\n}\n\n// Functions for processing the gtest_output flag.\n\n// Returns the output format, or \"\" for normal printed output.\nstd::string UnitTestOptions::GetOutputFormat() {\n  const char* const gtest_output_flag = GTEST_FLAG(output).c_str();\n  if (gtest_output_flag == NULL) return std::string(\"\");\n\n  const char* const colon = strchr(gtest_output_flag, ':');\n  return (colon == NULL) ?\n      std::string(gtest_output_flag) :\n      std::string(gtest_output_flag, colon - gtest_output_flag);\n}\n\n// Returns the name of the requested output file, or the default if none\n// was explicitly specified.\nstd::string UnitTestOptions::GetAbsolutePathToOutputFile() {\n  const char* const gtest_output_flag = GTEST_FLAG(output).c_str();\n  if (gtest_output_flag == NULL)\n    return \"\";\n\n  const char* const colon = strchr(gtest_output_flag, ':');\n  if (colon == NULL)\n    return internal::FilePath::ConcatPaths(\n        internal::FilePath(\n            UnitTest::GetInstance()->original_working_dir()),\n        internal::FilePath(kDefaultOutputFile)).string();\n\n  internal::FilePath output_name(colon + 1);\n  if (!output_name.IsAbsolutePath())\n    // TODO(wan@google.com): on Windows \\some\\path is not an absolute\n    // path (as its meaning depends on the current drive), yet the\n    // following logic for turning it into an absolute path is wrong.\n    // Fix it.\n    output_name = internal::FilePath::ConcatPaths(\n        internal::FilePath(UnitTest::GetInstance()->original_working_dir()),\n        internal::FilePath(colon + 1));\n\n  if (!output_name.IsDirectory())\n    return output_name.string();\n\n  internal::FilePath result(internal::FilePath::GenerateUniqueFileName(\n      output_name, internal::GetCurrentExecutableName(),\n      GetOutputFormat().c_str()));\n  return result.string();\n}\n\n// Returns true iff the wildcard pattern matches the string.  The\n// first ':' or '\\0' character in pattern marks the end of it.\n//\n// This recursive algorithm isn't very efficient, but is clear and\n// works well enough for matching test names, which are short.\nbool UnitTestOptions::PatternMatchesString(const char *pattern,\n                                           const char *str) {\n  switch (*pattern) {\n    case '\\0':\n    case ':':  // Either ':' or '\\0' marks the end of the pattern.\n      return *str == '\\0';\n    case '?':  // Matches any single character.\n      return *str != '\\0' && PatternMatchesString(pattern + 1, str + 1);\n    case '*':  // Matches any string (possibly empty) of characters.\n      return (*str != '\\0' && PatternMatchesString(pattern, str + 1)) ||\n          PatternMatchesString(pattern + 1, str);\n    default:  // Non-special character.  Matches itself.\n      return *pattern == *str &&\n          PatternMatchesString(pattern + 1, str + 1);\n  }\n}\n\nbool UnitTestOptions::MatchesFilter(\n    const std::string& name, const char* filter) {\n  const char *cur_pattern = filter;\n  for (;;) {\n    if (PatternMatchesString(cur_pattern, name.c_str())) {\n      return true;\n    }\n\n    // Finds the next pattern in the filter.\n    cur_pattern = strchr(cur_pattern, ':');\n\n    // Returns if no more pattern can be found.\n    if (cur_pattern == NULL) {\n      return false;\n    }\n\n    // Skips the pattern separater (the ':' character).\n    cur_pattern++;\n  }\n}\n\n// Returns true iff the user-specified filter matches the test case\n// name and the test name.\nbool UnitTestOptions::FilterMatchesTest(const std::string &test_case_name,\n                                        const std::string &test_name) {\n  const std::string& full_name = test_case_name + \".\" + test_name.c_str();\n\n  // Split --gtest_filter at '-', if there is one, to separate into\n  // positive filter and negative filter portions\n  const char* const p = GTEST_FLAG(filter).c_str();\n  const char* const dash = strchr(p, '-');\n  std::string positive;\n  std::string negative;\n  if (dash == NULL) {\n    positive = GTEST_FLAG(filter).c_str();  // Whole string is a positive filter\n    negative = \"\";\n  } else {\n    positive = std::string(p, dash);   // Everything up to the dash\n    negative = std::string(dash + 1);  // Everything after the dash\n    if (positive.empty()) {\n      // Treat '-test1' as the same as '*-test1'\n      positive = kUniversalFilter;\n    }\n  }\n\n  // A filter is a colon-separated list of patterns.  It matches a\n  // test if any pattern in it matches the test.\n  return (MatchesFilter(full_name, positive.c_str()) &&\n          !MatchesFilter(full_name, negative.c_str()));\n}\n\n#if GTEST_HAS_SEH\n// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the\n// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.\n// This function is useful as an __except condition.\nint UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) {\n  // Google Test should handle a SEH exception if:\n  //   1. the user wants it to, AND\n  //   2. this is not a breakpoint exception, AND\n  //   3. this is not a C++ exception (VC++ implements them via SEH,\n  //      apparently).\n  //\n  // SEH exception code for C++ exceptions.\n  // (see http://support.microsoft.com/kb/185294 for more information).\n  const DWORD kCxxExceptionCode = 0xe06d7363;\n\n  bool should_handle = true;\n\n  if (!GTEST_FLAG(catch_exceptions))\n    should_handle = false;\n  else if (exception_code == EXCEPTION_BREAKPOINT)\n    should_handle = false;\n  else if (exception_code == kCxxExceptionCode)\n    should_handle = false;\n\n  return should_handle ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH;\n}\n#endif  // GTEST_HAS_SEH\n\n}  // namespace internal\n\n// The c'tor sets this object as the test part result reporter used by\n// Google Test.  The 'result' parameter specifies where to report the\n// results. Intercepts only failures from the current thread.\nScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(\n    TestPartResultArray* result)\n    : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD),\n      result_(result) {\n  Init();\n}\n\n// The c'tor sets this object as the test part result reporter used by\n// Google Test.  The 'result' parameter specifies where to report the\n// results.\nScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(\n    InterceptMode intercept_mode, TestPartResultArray* result)\n    : intercept_mode_(intercept_mode),\n      result_(result) {\n  Init();\n}\n\nvoid ScopedFakeTestPartResultReporter::Init() {\n  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();\n  if (intercept_mode_ == INTERCEPT_ALL_THREADS) {\n    old_reporter_ = impl->GetGlobalTestPartResultReporter();\n    impl->SetGlobalTestPartResultReporter(this);\n  } else {\n    old_reporter_ = impl->GetTestPartResultReporterForCurrentThread();\n    impl->SetTestPartResultReporterForCurrentThread(this);\n  }\n}\n\n// The d'tor restores the test part result reporter used by Google Test\n// before.\nScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() {\n  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();\n  if (intercept_mode_ == INTERCEPT_ALL_THREADS) {\n    impl->SetGlobalTestPartResultReporter(old_reporter_);\n  } else {\n    impl->SetTestPartResultReporterForCurrentThread(old_reporter_);\n  }\n}\n\n// Increments the test part result count and remembers the result.\n// This method is from the TestPartResultReporterInterface interface.\nvoid ScopedFakeTestPartResultReporter::ReportTestPartResult(\n    const TestPartResult& result) {\n  result_->Append(result);\n}\n\nnamespace internal {\n\n// Returns the type ID of ::testing::Test.  We should always call this\n// instead of GetTypeId< ::testing::Test>() to get the type ID of\n// testing::Test.  This is to work around a suspected linker bug when\n// using Google Test as a framework on Mac OS X.  The bug causes\n// GetTypeId< ::testing::Test>() to return different values depending\n// on whether the call is from the Google Test framework itself or\n// from user test code.  GetTestTypeId() is guaranteed to always\n// return the same value, as it always calls GetTypeId<>() from the\n// gtest.cc, which is within the Google Test framework.\nTypeId GetTestTypeId() {\n  return GetTypeId<Test>();\n}\n\n// The value of GetTestTypeId() as seen from within the Google Test\n// library.  This is solely for testing GetTestTypeId().\nextern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId();\n\n// This predicate-formatter checks that 'results' contains a test part\n// failure of the given type and that the failure message contains the\n// given substring.\nAssertionResult HasOneFailure(const char* /* results_expr */,\n                              const char* /* type_expr */,\n                              const char* /* substr_expr */,\n                              const TestPartResultArray& results,\n                              TestPartResult::Type type,\n                              const string& substr) {\n  const std::string expected(type == TestPartResult::kFatalFailure ?\n                        \"1 fatal failure\" :\n                        \"1 non-fatal failure\");\n  Message msg;\n  if (results.size() != 1) {\n    msg << \"Expected: \" << expected << \"\\n\"\n        << \"  Actual: \" << results.size() << \" failures\";\n    for (int i = 0; i < results.size(); i++) {\n      msg << \"\\n\" << results.GetTestPartResult(i);\n    }\n    return AssertionFailure() << msg;\n  }\n\n  const TestPartResult& r = results.GetTestPartResult(0);\n  if (r.type() != type) {\n    return AssertionFailure() << \"Expected: \" << expected << \"\\n\"\n                              << \"  Actual:\\n\"\n                              << r;\n  }\n\n  if (strstr(r.message(), substr.c_str()) == NULL) {\n    return AssertionFailure() << \"Expected: \" << expected << \" containing \\\"\"\n                              << substr << \"\\\"\\n\"\n                              << \"  Actual:\\n\"\n                              << r;\n  }\n\n  return AssertionSuccess();\n}\n\n// The constructor of SingleFailureChecker remembers where to look up\n// test part results, what type of failure we expect, and what\n// substring the failure message should contain.\nSingleFailureChecker:: SingleFailureChecker(\n    const TestPartResultArray* results,\n    TestPartResult::Type type,\n    const string& substr)\n    : results_(results),\n      type_(type),\n      substr_(substr) {}\n\n// The destructor of SingleFailureChecker verifies that the given\n// TestPartResultArray contains exactly one failure that has the given\n// type and contains the given substring.  If that's not the case, a\n// non-fatal failure will be generated.\nSingleFailureChecker::~SingleFailureChecker() {\n  EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_);\n}\n\nDefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter(\n    UnitTestImpl* unit_test) : unit_test_(unit_test) {}\n\nvoid DefaultGlobalTestPartResultReporter::ReportTestPartResult(\n    const TestPartResult& result) {\n  unit_test_->current_test_result()->AddTestPartResult(result);\n  unit_test_->listeners()->repeater()->OnTestPartResult(result);\n}\n\nDefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter(\n    UnitTestImpl* unit_test) : unit_test_(unit_test) {}\n\nvoid DefaultPerThreadTestPartResultReporter::ReportTestPartResult(\n    const TestPartResult& result) {\n  unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result);\n}\n\n// Returns the global test part result reporter.\nTestPartResultReporterInterface*\nUnitTestImpl::GetGlobalTestPartResultReporter() {\n  internal::MutexLock lock(&global_test_part_result_reporter_mutex_);\n  return global_test_part_result_repoter_;\n}\n\n// Sets the global test part result reporter.\nvoid UnitTestImpl::SetGlobalTestPartResultReporter(\n    TestPartResultReporterInterface* reporter) {\n  internal::MutexLock lock(&global_test_part_result_reporter_mutex_);\n  global_test_part_result_repoter_ = reporter;\n}\n\n// Returns the test part result reporter for the current thread.\nTestPartResultReporterInterface*\nUnitTestImpl::GetTestPartResultReporterForCurrentThread() {\n  return per_thread_test_part_result_reporter_.get();\n}\n\n// Sets the test part result reporter for the current thread.\nvoid UnitTestImpl::SetTestPartResultReporterForCurrentThread(\n    TestPartResultReporterInterface* reporter) {\n  per_thread_test_part_result_reporter_.set(reporter);\n}\n\n// Gets the number of successful test cases.\nint UnitTestImpl::successful_test_case_count() const {\n  return CountIf(test_cases_, TestCasePassed);\n}\n\n// Gets the number of failed test cases.\nint UnitTestImpl::failed_test_case_count() const {\n  return CountIf(test_cases_, TestCaseFailed);\n}\n\n// Gets the number of all test cases.\nint UnitTestImpl::total_test_case_count() const {\n  return static_cast<int>(test_cases_.size());\n}\n\n// Gets the number of all test cases that contain at least one test\n// that should run.\nint UnitTestImpl::test_case_to_run_count() const {\n  return CountIf(test_cases_, ShouldRunTestCase);\n}\n\n// Gets the number of successful tests.\nint UnitTestImpl::successful_test_count() const {\n  return SumOverTestCaseList(test_cases_, &TestCase::successful_test_count);\n}\n\n// Gets the number of failed tests.\nint UnitTestImpl::failed_test_count() const {\n  return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count);\n}\n\n// Gets the number of disabled tests that will be reported in the XML report.\nint UnitTestImpl::reportable_disabled_test_count() const {\n  return SumOverTestCaseList(test_cases_,\n                             &TestCase::reportable_disabled_test_count);\n}\n\n// Gets the number of disabled tests.\nint UnitTestImpl::disabled_test_count() const {\n  return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count);\n}\n\n// Gets the number of tests to be printed in the XML report.\nint UnitTestImpl::reportable_test_count() const {\n  return SumOverTestCaseList(test_cases_, &TestCase::reportable_test_count);\n}\n\n// Gets the number of all tests.\nint UnitTestImpl::total_test_count() const {\n  return SumOverTestCaseList(test_cases_, &TestCase::total_test_count);\n}\n\n// Gets the number of tests that should run.\nint UnitTestImpl::test_to_run_count() const {\n  return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count);\n}\n\n// Returns the current OS stack trace as an std::string.\n//\n// The maximum number of stack frames to be included is specified by\n// the gtest_stack_trace_depth flag.  The skip_count parameter\n// specifies the number of top frames to be skipped, which doesn't\n// count against the number of frames to be included.\n//\n// For example, if Foo() calls Bar(), which in turn calls\n// CurrentOsStackTraceExceptTop(1), Foo() will be included in the\n// trace but Bar() and CurrentOsStackTraceExceptTop() won't.\nstd::string UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) {\n  return os_stack_trace_getter()->CurrentStackTrace(\n      static_cast<int>(GTEST_FLAG(stack_trace_depth)),\n      skip_count + 1\n      // Skips the user-specified number of frames plus this function\n      // itself.\n      );  // NOLINT\n}\n\n// Returns the current time in milliseconds.\nTimeInMillis GetTimeInMillis() {\n#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__)\n  // Difference between 1970-01-01 and 1601-01-01 in milliseconds.\n  // http://analogous.blogspot.com/2005/04/epoch.html\n  const TimeInMillis kJavaEpochToWinFileTimeDelta =\n    static_cast<TimeInMillis>(116444736UL) * 100000UL;\n  const DWORD kTenthMicrosInMilliSecond = 10000;\n\n  SYSTEMTIME now_systime;\n  FILETIME now_filetime;\n  ULARGE_INTEGER now_int64;\n  // TODO(kenton@google.com): Shouldn't this just use\n  //   GetSystemTimeAsFileTime()?\n  GetSystemTime(&now_systime);\n  if (SystemTimeToFileTime(&now_systime, &now_filetime)) {\n    now_int64.LowPart = now_filetime.dwLowDateTime;\n    now_int64.HighPart = now_filetime.dwHighDateTime;\n    now_int64.QuadPart = (now_int64.QuadPart / kTenthMicrosInMilliSecond) -\n      kJavaEpochToWinFileTimeDelta;\n    return now_int64.QuadPart;\n  }\n  return 0;\n#elif GTEST_OS_WINDOWS && !GTEST_HAS_GETTIMEOFDAY_\n  __timeb64 now;\n\n  // MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996\n  // (deprecated function) there.\n  // TODO(kenton@google.com): Use GetTickCount()?  Or use\n  //   SystemTimeToFileTime()\n  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996)\n  _ftime64(&now);\n  GTEST_DISABLE_MSC_WARNINGS_POP_()\n\n  return static_cast<TimeInMillis>(now.time) * 1000 + now.millitm;\n#elif GTEST_HAS_GETTIMEOFDAY_\n  struct timeval now;\n  gettimeofday(&now, NULL);\n  return static_cast<TimeInMillis>(now.tv_sec) * 1000 + now.tv_usec / 1000;\n#else\n# error \"Don't know how to get the current time on your system.\"\n#endif\n}\n\n// Utilities\n\n// class String.\n\n#if GTEST_OS_WINDOWS_MOBILE\n// Creates a UTF-16 wide string from the given ANSI string, allocating\n// memory using new. The caller is responsible for deleting the return\n// value using delete[]. Returns the wide string, or NULL if the\n// input is NULL.\nLPCWSTR String::AnsiToUtf16(const char* ansi) {\n  if (!ansi) return NULL;\n  const int length = strlen(ansi);\n  const int unicode_length =\n      MultiByteToWideChar(CP_ACP, 0, ansi, length,\n                          NULL, 0);\n  WCHAR* unicode = new WCHAR[unicode_length + 1];\n  MultiByteToWideChar(CP_ACP, 0, ansi, length,\n                      unicode, unicode_length);\n  unicode[unicode_length] = 0;\n  return unicode;\n}\n\n// Creates an ANSI string from the given wide string, allocating\n// memory using new. The caller is responsible for deleting the return\n// value using delete[]. Returns the ANSI string, or NULL if the\n// input is NULL.\nconst char* String::Utf16ToAnsi(LPCWSTR utf16_str)  {\n  if (!utf16_str) return NULL;\n  const int ansi_length =\n      WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,\n                          NULL, 0, NULL, NULL);\n  char* ansi = new char[ansi_length + 1];\n  WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,\n                      ansi, ansi_length, NULL, NULL);\n  ansi[ansi_length] = 0;\n  return ansi;\n}\n\n#endif  // GTEST_OS_WINDOWS_MOBILE\n\n// Compares two C strings.  Returns true iff they have the same content.\n//\n// Unlike strcmp(), this function can handle NULL argument(s).  A NULL\n// C string is considered different to any non-NULL C string,\n// including the empty string.\nbool String::CStringEquals(const char * lhs, const char * rhs) {\n  if ( lhs == NULL ) return rhs == NULL;\n\n  if ( rhs == NULL ) return false;\n\n  return strcmp(lhs, rhs) == 0;\n}\n\n#if GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING\n\n// Converts an array of wide chars to a narrow string using the UTF-8\n// encoding, and streams the result to the given Message object.\nstatic void StreamWideCharsToMessage(const wchar_t* wstr, size_t length,\n                                     Message* msg) {\n  for (size_t i = 0; i != length; ) {  // NOLINT\n    if (wstr[i] != L'\\0') {\n      *msg << WideStringToUtf8(wstr + i, static_cast<int>(length - i));\n      while (i != length && wstr[i] != L'\\0')\n        i++;\n    } else {\n      *msg << '\\0';\n      i++;\n    }\n  }\n}\n\n#endif  // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING\n\nvoid SplitString(const ::std::string& str, char delimiter,\n                 ::std::vector< ::std::string>* dest) {\n  ::std::vector< ::std::string> parsed;\n  ::std::string::size_type pos = 0;\n  while (::testing::internal::AlwaysTrue()) {\n    const ::std::string::size_type colon = str.find(delimiter, pos);\n    if (colon == ::std::string::npos) {\n      parsed.push_back(str.substr(pos));\n      break;\n    } else {\n      parsed.push_back(str.substr(pos, colon - pos));\n      pos = colon + 1;\n    }\n  }\n  dest->swap(parsed);\n}\n\n}  // namespace internal\n\n// Constructs an empty Message.\n// We allocate the stringstream separately because otherwise each use of\n// ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's\n// stack frame leading to huge stack frames in some cases; gcc does not reuse\n// the stack space.\nMessage::Message() : ss_(new ::std::stringstream) {\n  // By default, we want there to be enough precision when printing\n  // a double to a Message.\n  *ss_ << std::setprecision(std::numeric_limits<double>::digits10 + 2);\n}\n\n// These two overloads allow streaming a wide C string to a Message\n// using the UTF-8 encoding.\nMessage& Message::operator <<(const wchar_t* wide_c_str) {\n  return *this << internal::String::ShowWideCString(wide_c_str);\n}\nMessage& Message::operator <<(wchar_t* wide_c_str) {\n  return *this << internal::String::ShowWideCString(wide_c_str);\n}\n\n#if GTEST_HAS_STD_WSTRING\n// Converts the given wide string to a narrow string using the UTF-8\n// encoding, and streams the result to this Message object.\nMessage& Message::operator <<(const ::std::wstring& wstr) {\n  internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);\n  return *this;\n}\n#endif  // GTEST_HAS_STD_WSTRING\n\n#if GTEST_HAS_GLOBAL_WSTRING\n// Converts the given wide string to a narrow string using the UTF-8\n// encoding, and streams the result to this Message object.\nMessage& Message::operator <<(const ::wstring& wstr) {\n  internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);\n  return *this;\n}\n#endif  // GTEST_HAS_GLOBAL_WSTRING\n\n// Gets the text streamed to this object so far as an std::string.\n// Each '\\0' character in the buffer is replaced with \"\\\\0\".\nstd::string Message::GetString() const {\n  return internal::StringStreamToString(ss_.get());\n}\n\n// AssertionResult constructors.\n// Used in EXPECT_TRUE/FALSE(assertion_result).\nAssertionResult::AssertionResult(const AssertionResult& other)\n    : success_(other.success_),\n      message_(other.message_.get() != NULL ?\n               new ::std::string(*other.message_) :\n               static_cast< ::std::string*>(NULL)) {\n}\n\n// Swaps two AssertionResults.\nvoid AssertionResult::swap(AssertionResult& other) {\n  using std::swap;\n  swap(success_, other.success_);\n  swap(message_, other.message_);\n}\n\n// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.\nAssertionResult AssertionResult::operator!() const {\n  AssertionResult negation(!success_);\n  if (message_.get() != NULL)\n    negation << *message_;\n  return negation;\n}\n\n// Makes a successful assertion result.\nAssertionResult AssertionSuccess() {\n  return AssertionResult(true);\n}\n\n// Makes a failed assertion result.\nAssertionResult AssertionFailure() {\n  return AssertionResult(false);\n}\n\n// Makes a failed assertion result with the given failure message.\n// Deprecated; use AssertionFailure() << message.\nAssertionResult AssertionFailure(const Message& message) {\n  return AssertionFailure() << message;\n}\n\nnamespace internal {\n\nnamespace edit_distance {\nstd::vector<EditType> CalculateOptimalEdits(const std::vector<size_t>& left,\n                                            const std::vector<size_t>& right) {\n  std::vector<std::vector<double> > costs(\n      left.size() + 1, std::vector<double>(right.size() + 1));\n  std::vector<std::vector<EditType> > best_move(\n      left.size() + 1, std::vector<EditType>(right.size() + 1));\n\n  // Populate for empty right.\n  for (size_t l_i = 0; l_i < costs.size(); ++l_i) {\n    costs[l_i][0] = static_cast<double>(l_i);\n    best_move[l_i][0] = kRemove;\n  }\n  // Populate for empty left.\n  for (size_t r_i = 1; r_i < costs[0].size(); ++r_i) {\n    costs[0][r_i] = static_cast<double>(r_i);\n    best_move[0][r_i] = kAdd;\n  }\n\n  for (size_t l_i = 0; l_i < left.size(); ++l_i) {\n    for (size_t r_i = 0; r_i < right.size(); ++r_i) {\n      if (left[l_i] == right[r_i]) {\n        // Found a match. Consume it.\n        costs[l_i + 1][r_i + 1] = costs[l_i][r_i];\n        best_move[l_i + 1][r_i + 1] = kMatch;\n        continue;\n      }\n\n      const double add = costs[l_i + 1][r_i];\n      const double remove = costs[l_i][r_i + 1];\n      const double replace = costs[l_i][r_i];\n      if (add < remove && add < replace) {\n        costs[l_i + 1][r_i + 1] = add + 1;\n        best_move[l_i + 1][r_i + 1] = kAdd;\n      } else if (remove < add && remove < replace) {\n        costs[l_i + 1][r_i + 1] = remove + 1;\n        best_move[l_i + 1][r_i + 1] = kRemove;\n      } else {\n        // We make replace a little more expensive than add/remove to lower\n        // their priority.\n        costs[l_i + 1][r_i + 1] = replace + 1.00001;\n        best_move[l_i + 1][r_i + 1] = kReplace;\n      }\n    }\n  }\n\n  // Reconstruct the best path. We do it in reverse order.\n  std::vector<EditType> best_path;\n  for (size_t l_i = left.size(), r_i = right.size(); l_i > 0 || r_i > 0;) {\n    EditType move = best_move[l_i][r_i];\n    best_path.push_back(move);\n    l_i -= move != kAdd;\n    r_i -= move != kRemove;\n  }\n  std::reverse(best_path.begin(), best_path.end());\n  return best_path;\n}\n\nnamespace {\n\n// Helper class to convert string into ids with deduplication.\nclass InternalStrings {\n public:\n  size_t GetId(const std::string& str) {\n    IdMap::iterator it = ids_.find(str);\n    if (it != ids_.end()) return it->second;\n    size_t id = ids_.size();\n    return ids_[str] = id;\n  }\n\n private:\n  typedef std::map<std::string, size_t> IdMap;\n  IdMap ids_;\n};\n\n}  // namespace\n\nstd::vector<EditType> CalculateOptimalEdits(\n    const std::vector<std::string>& left,\n    const std::vector<std::string>& right) {\n  std::vector<size_t> left_ids, right_ids;\n  {\n    InternalStrings intern_table;\n    for (size_t i = 0; i < left.size(); ++i) {\n      left_ids.push_back(intern_table.GetId(left[i]));\n    }\n    for (size_t i = 0; i < right.size(); ++i) {\n      right_ids.push_back(intern_table.GetId(right[i]));\n    }\n  }\n  return CalculateOptimalEdits(left_ids, right_ids);\n}\n\nnamespace {\n\n// Helper class that holds the state for one hunk and prints it out to the\n// stream.\n// It reorders adds/removes when possible to group all removes before all\n// adds. It also adds the hunk header before printint into the stream.\nclass Hunk {\n public:\n  Hunk(size_t left_start, size_t right_start)\n      : left_start_(left_start),\n        right_start_(right_start),\n        adds_(),\n        removes_(),\n        common_() {}\n\n  void PushLine(char edit, const char* line) {\n    switch (edit) {\n      case ' ':\n        ++common_;\n        FlushEdits();\n        hunk_.push_back(std::make_pair(' ', line));\n        break;\n      case '-':\n        ++removes_;\n        hunk_removes_.push_back(std::make_pair('-', line));\n        break;\n      case '+':\n        ++adds_;\n        hunk_adds_.push_back(std::make_pair('+', line));\n        break;\n    }\n  }\n\n  void PrintTo(std::ostream* os) {\n    PrintHeader(os);\n    FlushEdits();\n    for (std::list<std::pair<char, const char*> >::const_iterator it =\n             hunk_.begin();\n         it != hunk_.end(); ++it) {\n      *os << it->first << it->second << \"\\n\";\n    }\n  }\n\n  bool has_edits() const { return adds_ || removes_; }\n\n private:\n  void FlushEdits() {\n    hunk_.splice(hunk_.end(), hunk_removes_);\n    hunk_.splice(hunk_.end(), hunk_adds_);\n  }\n\n  // Print a unified diff header for one hunk.\n  // The format is\n  //   \"@@ -<left_start>,<left_length> +<right_start>,<right_length> @@\"\n  // where the left/right parts are ommitted if unnecessary.\n  void PrintHeader(std::ostream* ss) const {\n    *ss << \"@@ \";\n    if (removes_) {\n      *ss << \"-\" << left_start_ << \",\" << (removes_ + common_);\n    }\n    if (removes_ && adds_) {\n      *ss << \" \";\n    }\n    if (adds_) {\n      *ss << \"+\" << right_start_ << \",\" << (adds_ + common_);\n    }\n    *ss << \" @@\\n\";\n  }\n\n  size_t left_start_, right_start_;\n  size_t adds_, removes_, common_;\n  std::list<std::pair<char, const char*> > hunk_, hunk_adds_, hunk_removes_;\n};\n\n}  // namespace\n\n// Create a list of diff hunks in Unified diff format.\n// Each hunk has a header generated by PrintHeader above plus a body with\n// lines prefixed with ' ' for no change, '-' for deletion and '+' for\n// addition.\n// 'context' represents the desired unchanged prefix/suffix around the diff.\n// If two hunks are close enough that their contexts overlap, then they are\n// joined into one hunk.\nstd::string CreateUnifiedDiff(const std::vector<std::string>& left,\n                              const std::vector<std::string>& right,\n                              size_t context) {\n  const std::vector<EditType> edits = CalculateOptimalEdits(left, right);\n\n  size_t l_i = 0, r_i = 0, edit_i = 0;\n  std::stringstream ss;\n  while (edit_i < edits.size()) {\n    // Find first edit.\n    while (edit_i < edits.size() && edits[edit_i] == kMatch) {\n      ++l_i;\n      ++r_i;\n      ++edit_i;\n    }\n\n    // Find the first line to include in the hunk.\n    const size_t prefix_context = std::min(l_i, context);\n    Hunk hunk(l_i - prefix_context + 1, r_i - prefix_context + 1);\n    for (size_t i = prefix_context; i > 0; --i) {\n      hunk.PushLine(' ', left[l_i - i].c_str());\n    }\n\n    // Iterate the edits until we found enough suffix for the hunk or the input\n    // is over.\n    size_t n_suffix = 0;\n    for (; edit_i < edits.size(); ++edit_i) {\n      if (n_suffix >= context) {\n        // Continue only if the next hunk is very close.\n        std::vector<EditType>::const_iterator it = edits.begin() + edit_i;\n        while (it != edits.end() && *it == kMatch) ++it;\n        if (it == edits.end() || (it - edits.begin()) - edit_i >= context) {\n          // There is no next edit or it is too far away.\n          break;\n        }\n      }\n\n      EditType edit = edits[edit_i];\n      // Reset count when a non match is found.\n      n_suffix = edit == kMatch ? n_suffix + 1 : 0;\n\n      if (edit == kMatch || edit == kRemove || edit == kReplace) {\n        hunk.PushLine(edit == kMatch ? ' ' : '-', left[l_i].c_str());\n      }\n      if (edit == kAdd || edit == kReplace) {\n        hunk.PushLine('+', right[r_i].c_str());\n      }\n\n      // Advance indices, depending on edit type.\n      l_i += edit != kAdd;\n      r_i += edit != kRemove;\n    }\n\n    if (!hunk.has_edits()) {\n      // We are done. We don't want this hunk.\n      break;\n    }\n\n    hunk.PrintTo(&ss);\n  }\n  return ss.str();\n}\n\n}  // namespace edit_distance\n\nnamespace {\n\n// The string representation of the values received in EqFailure() are already\n// escaped. Split them on escaped '\\n' boundaries. Leave all other escaped\n// characters the same.\nstd::vector<std::string> SplitEscapedString(const std::string& str) {\n  std::vector<std::string> lines;\n  size_t start = 0, end = str.size();\n  if (end > 2 && str[0] == '\"' && str[end - 1] == '\"') {\n    ++start;\n    --end;\n  }\n  bool escaped = false;\n  for (size_t i = start; i + 1 < end; ++i) {\n    if (escaped) {\n      escaped = false;\n      if (str[i] == 'n') {\n        lines.push_back(str.substr(start, i - start - 1));\n        start = i + 1;\n      }\n    } else {\n      escaped = str[i] == '\\\\';\n    }\n  }\n  lines.push_back(str.substr(start, end - start));\n  return lines;\n}\n\n}  // namespace\n\n// Constructs and returns the message for an equality assertion\n// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.\n//\n// The first four parameters are the expressions used in the assertion\n// and their values, as strings.  For example, for ASSERT_EQ(foo, bar)\n// where foo is 5 and bar is 6, we have:\n//\n//   expected_expression: \"foo\"\n//   actual_expression:   \"bar\"\n//   expected_value:      \"5\"\n//   actual_value:        \"6\"\n//\n// The ignoring_case parameter is true iff the assertion is a\n// *_STRCASEEQ*.  When it's true, the string \" (ignoring case)\" will\n// be inserted into the message.\nAssertionResult EqFailure(const char* expected_expression,\n                          const char* actual_expression,\n                          const std::string& expected_value,\n                          const std::string& actual_value,\n                          bool ignoring_case) {\n  Message msg;\n  msg << \"Value of: \" << actual_expression;\n  if (actual_value != actual_expression) {\n    msg << \"\\n  Actual: \" << actual_value;\n  }\n\n  msg << \"\\nExpected: \" << expected_expression;\n  if (ignoring_case) {\n    msg << \" (ignoring case)\";\n  }\n  if (expected_value != expected_expression) {\n    msg << \"\\nWhich is: \" << expected_value;\n  }\n\n  if (!expected_value.empty() && !actual_value.empty()) {\n    const std::vector<std::string> expected_lines =\n        SplitEscapedString(expected_value);\n    const std::vector<std::string> actual_lines =\n        SplitEscapedString(actual_value);\n    if (expected_lines.size() > 1 || actual_lines.size() > 1) {\n      msg << \"\\nWith diff:\\n\"\n          << edit_distance::CreateUnifiedDiff(expected_lines, actual_lines);\n    }\n  }\n\n  return AssertionFailure() << msg;\n}\n\n// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.\nstd::string GetBoolAssertionFailureMessage(\n    const AssertionResult& assertion_result,\n    const char* expression_text,\n    const char* actual_predicate_value,\n    const char* expected_predicate_value) {\n  const char* actual_message = assertion_result.message();\n  Message msg;\n  msg << \"Value of: \" << expression_text\n      << \"\\n  Actual: \" << actual_predicate_value;\n  if (actual_message[0] != '\\0')\n    msg << \" (\" << actual_message << \")\";\n  msg << \"\\nExpected: \" << expected_predicate_value;\n  return msg.GetString();\n}\n\n// Helper function for implementing ASSERT_NEAR.\nAssertionResult DoubleNearPredFormat(const char* expr1,\n                                     const char* expr2,\n                                     const char* abs_error_expr,\n                                     double val1,\n                                     double val2,\n                                     double abs_error) {\n  const double diff = fabs(val1 - val2);\n  if (diff <= abs_error) return AssertionSuccess();\n\n  // TODO(wan): do not print the value of an expression if it's\n  // already a literal.\n  return AssertionFailure()\n      << \"The difference between \" << expr1 << \" and \" << expr2\n      << \" is \" << diff << \", which exceeds \" << abs_error_expr << \", where\\n\"\n      << expr1 << \" evaluates to \" << val1 << \",\\n\"\n      << expr2 << \" evaluates to \" << val2 << \", and\\n\"\n      << abs_error_expr << \" evaluates to \" << abs_error << \".\";\n}\n\n\n// Helper template for implementing FloatLE() and DoubleLE().\ntemplate <typename RawType>\nAssertionResult FloatingPointLE(const char* expr1,\n                                const char* expr2,\n                                RawType val1,\n                                RawType val2) {\n  // Returns success if val1 is less than val2,\n  if (val1 < val2) {\n    return AssertionSuccess();\n  }\n\n  // or if val1 is almost equal to val2.\n  const FloatingPoint<RawType> lhs(val1), rhs(val2);\n  if (lhs.AlmostEquals(rhs)) {\n    return AssertionSuccess();\n  }\n\n  // Note that the above two checks will both fail if either val1 or\n  // val2 is NaN, as the IEEE floating-point standard requires that\n  // any predicate involving a NaN must return false.\n\n  ::std::stringstream val1_ss;\n  val1_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)\n          << val1;\n\n  ::std::stringstream val2_ss;\n  val2_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)\n          << val2;\n\n  return AssertionFailure()\n      << \"Expected: (\" << expr1 << \") <= (\" << expr2 << \")\\n\"\n      << \"  Actual: \" << StringStreamToString(&val1_ss) << \" vs \"\n      << StringStreamToString(&val2_ss);\n}\n\n}  // namespace internal\n\n// Asserts that val1 is less than, or almost equal to, val2.  Fails\n// otherwise.  In particular, it fails if either val1 or val2 is NaN.\nAssertionResult FloatLE(const char* expr1, const char* expr2,\n                        float val1, float val2) {\n  return internal::FloatingPointLE<float>(expr1, expr2, val1, val2);\n}\n\n// Asserts that val1 is less than, or almost equal to, val2.  Fails\n// otherwise.  In particular, it fails if either val1 or val2 is NaN.\nAssertionResult DoubleLE(const char* expr1, const char* expr2,\n                         double val1, double val2) {\n  return internal::FloatingPointLE<double>(expr1, expr2, val1, val2);\n}\n\nnamespace internal {\n\n// The helper function for {ASSERT|EXPECT}_EQ with int or enum\n// arguments.\nAssertionResult CmpHelperEQ(const char* expected_expression,\n                            const char* actual_expression,\n                            BiggestInt expected,\n                            BiggestInt actual) {\n  if (expected == actual) {\n    return AssertionSuccess();\n  }\n\n  return EqFailure(expected_expression,\n                   actual_expression,\n                   FormatForComparisonFailureMessage(expected, actual),\n                   FormatForComparisonFailureMessage(actual, expected),\n                   false);\n}\n\n// A macro for implementing the helper functions needed to implement\n// ASSERT_?? and EXPECT_?? with integer or enum arguments.  It is here\n// just to avoid copy-and-paste of similar code.\n#define GTEST_IMPL_CMP_HELPER_(op_name, op)\\\nAssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \\\n                                   BiggestInt val1, BiggestInt val2) {\\\n  if (val1 op val2) {\\\n    return AssertionSuccess();\\\n  } else {\\\n    return AssertionFailure() \\\n        << \"Expected: (\" << expr1 << \") \" #op \" (\" << expr2\\\n        << \"), actual: \" << FormatForComparisonFailureMessage(val1, val2)\\\n        << \" vs \" << FormatForComparisonFailureMessage(val2, val1);\\\n  }\\\n}\n\n// Implements the helper function for {ASSERT|EXPECT}_NE with int or\n// enum arguments.\nGTEST_IMPL_CMP_HELPER_(NE, !=)\n// Implements the helper function for {ASSERT|EXPECT}_LE with int or\n// enum arguments.\nGTEST_IMPL_CMP_HELPER_(LE, <=)\n// Implements the helper function for {ASSERT|EXPECT}_LT with int or\n// enum arguments.\nGTEST_IMPL_CMP_HELPER_(LT, < )\n// Implements the helper function for {ASSERT|EXPECT}_GE with int or\n// enum arguments.\nGTEST_IMPL_CMP_HELPER_(GE, >=)\n// Implements the helper function for {ASSERT|EXPECT}_GT with int or\n// enum arguments.\nGTEST_IMPL_CMP_HELPER_(GT, > )\n\n#undef GTEST_IMPL_CMP_HELPER_\n\n// The helper function for {ASSERT|EXPECT}_STREQ.\nAssertionResult CmpHelperSTREQ(const char* expected_expression,\n                               const char* actual_expression,\n                               const char* expected,\n                               const char* actual) {\n  if (String::CStringEquals(expected, actual)) {\n    return AssertionSuccess();\n  }\n\n  return EqFailure(expected_expression,\n                   actual_expression,\n                   PrintToString(expected),\n                   PrintToString(actual),\n                   false);\n}\n\n// The helper function for {ASSERT|EXPECT}_STRCASEEQ.\nAssertionResult CmpHelperSTRCASEEQ(const char* expected_expression,\n                                   const char* actual_expression,\n                                   const char* expected,\n                                   const char* actual) {\n  if (String::CaseInsensitiveCStringEquals(expected, actual)) {\n    return AssertionSuccess();\n  }\n\n  return EqFailure(expected_expression,\n                   actual_expression,\n                   PrintToString(expected),\n                   PrintToString(actual),\n                   true);\n}\n\n// The helper function for {ASSERT|EXPECT}_STRNE.\nAssertionResult CmpHelperSTRNE(const char* s1_expression,\n                               const char* s2_expression,\n                               const char* s1,\n                               const char* s2) {\n  if (!String::CStringEquals(s1, s2)) {\n    return AssertionSuccess();\n  } else {\n    return AssertionFailure() << \"Expected: (\" << s1_expression << \") != (\"\n                              << s2_expression << \"), actual: \\\"\"\n                              << s1 << \"\\\" vs \\\"\" << s2 << \"\\\"\";\n  }\n}\n\n// The helper function for {ASSERT|EXPECT}_STRCASENE.\nAssertionResult CmpHelperSTRCASENE(const char* s1_expression,\n                                   const char* s2_expression,\n                                   const char* s1,\n                                   const char* s2) {\n  if (!String::CaseInsensitiveCStringEquals(s1, s2)) {\n    return AssertionSuccess();\n  } else {\n    return AssertionFailure()\n        << \"Expected: (\" << s1_expression << \") != (\"\n        << s2_expression << \") (ignoring case), actual: \\\"\"\n        << s1 << \"\\\" vs \\\"\" << s2 << \"\\\"\";\n  }\n}\n\n}  // namespace internal\n\nnamespace {\n\n// Helper functions for implementing IsSubString() and IsNotSubstring().\n\n// This group of overloaded functions return true iff needle is a\n// substring of haystack.  NULL is considered a substring of itself\n// only.\n\nbool IsSubstringPred(const char* needle, const char* haystack) {\n  if (needle == NULL || haystack == NULL)\n    return needle == haystack;\n\n  return strstr(haystack, needle) != NULL;\n}\n\nbool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) {\n  if (needle == NULL || haystack == NULL)\n    return needle == haystack;\n\n  return wcsstr(haystack, needle) != NULL;\n}\n\n// StringType here can be either ::std::string or ::std::wstring.\ntemplate <typename StringType>\nbool IsSubstringPred(const StringType& needle,\n                     const StringType& haystack) {\n  return haystack.find(needle) != StringType::npos;\n}\n\n// This function implements either IsSubstring() or IsNotSubstring(),\n// depending on the value of the expected_to_be_substring parameter.\n// StringType here can be const char*, const wchar_t*, ::std::string,\n// or ::std::wstring.\ntemplate <typename StringType>\nAssertionResult IsSubstringImpl(\n    bool expected_to_be_substring,\n    const char* needle_expr, const char* haystack_expr,\n    const StringType& needle, const StringType& haystack) {\n  if (IsSubstringPred(needle, haystack) == expected_to_be_substring)\n    return AssertionSuccess();\n\n  const bool is_wide_string = sizeof(needle[0]) > 1;\n  const char* const begin_string_quote = is_wide_string ? \"L\\\"\" : \"\\\"\";\n  return AssertionFailure()\n      << \"Value of: \" << needle_expr << \"\\n\"\n      << \"  Actual: \" << begin_string_quote << needle << \"\\\"\\n\"\n      << \"Expected: \" << (expected_to_be_substring ? \"\" : \"not \")\n      << \"a substring of \" << haystack_expr << \"\\n\"\n      << \"Which is: \" << begin_string_quote << haystack << \"\\\"\";\n}\n\n}  // namespace\n\n// IsSubstring() and IsNotSubstring() check whether needle is a\n// substring of haystack (NULL is considered a substring of itself\n// only), and return an appropriate error message when they fail.\n\nAssertionResult IsSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const char* needle, const char* haystack) {\n  return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);\n}\n\nAssertionResult IsSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const wchar_t* needle, const wchar_t* haystack) {\n  return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);\n}\n\nAssertionResult IsNotSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const char* needle, const char* haystack) {\n  return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);\n}\n\nAssertionResult IsNotSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const wchar_t* needle, const wchar_t* haystack) {\n  return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);\n}\n\nAssertionResult IsSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const ::std::string& needle, const ::std::string& haystack) {\n  return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);\n}\n\nAssertionResult IsNotSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const ::std::string& needle, const ::std::string& haystack) {\n  return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);\n}\n\n#if GTEST_HAS_STD_WSTRING\nAssertionResult IsSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const ::std::wstring& needle, const ::std::wstring& haystack) {\n  return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);\n}\n\nAssertionResult IsNotSubstring(\n    const char* needle_expr, const char* haystack_expr,\n    const ::std::wstring& needle, const ::std::wstring& haystack) {\n  return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);\n}\n#endif  // GTEST_HAS_STD_WSTRING\n\nnamespace internal {\n\n#if GTEST_OS_WINDOWS\n\nnamespace {\n\n// Helper function for IsHRESULT{SuccessFailure} predicates\nAssertionResult HRESULTFailureHelper(const char* expr,\n                                     const char* expected,\n                                     long hr) {  // NOLINT\n# if GTEST_OS_WINDOWS_MOBILE\n\n  // Windows CE doesn't support FormatMessage.\n  const char error_text[] = \"\";\n\n# else\n\n  // Looks up the human-readable system message for the HRESULT code\n  // and since we're not passing any params to FormatMessage, we don't\n  // want inserts expanded.\n  const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM |\n                       FORMAT_MESSAGE_IGNORE_INSERTS;\n  const DWORD kBufSize = 4096;\n  // Gets the system's human readable message string for this HRESULT.\n  char error_text[kBufSize] = { '\\0' };\n  DWORD message_length = ::FormatMessageA(kFlags,\n                                          0,  // no source, we're asking system\n                                          hr,  // the error\n                                          0,  // no line width restrictions\n                                          error_text,  // output buffer\n                                          kBufSize,  // buf size\n                                          NULL);  // no arguments for inserts\n  // Trims tailing white space (FormatMessage leaves a trailing CR-LF)\n  for (; message_length && IsSpace(error_text[message_length - 1]);\n          --message_length) {\n    error_text[message_length - 1] = '\\0';\n  }\n\n# endif  // GTEST_OS_WINDOWS_MOBILE\n\n  const std::string error_hex(\"0x\" + String::FormatHexInt(hr));\n  return ::testing::AssertionFailure()\n      << \"Expected: \" << expr << \" \" << expected << \".\\n\"\n      << \"  Actual: \" << error_hex << \" \" << error_text << \"\\n\";\n}\n\n}  // namespace\n\nAssertionResult IsHRESULTSuccess(const char* expr, long hr) {  // NOLINT\n  if (SUCCEEDED(hr)) {\n    return AssertionSuccess();\n  }\n  return HRESULTFailureHelper(expr, \"succeeds\", hr);\n}\n\nAssertionResult IsHRESULTFailure(const char* expr, long hr) {  // NOLINT\n  if (FAILED(hr)) {\n    return AssertionSuccess();\n  }\n  return HRESULTFailureHelper(expr, \"fails\", hr);\n}\n\n#endif  // GTEST_OS_WINDOWS\n\n// Utility functions for encoding Unicode text (wide strings) in\n// UTF-8.\n\n// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8\n// like this:\n//\n// Code-point length   Encoding\n//   0 -  7 bits       0xxxxxxx\n//   8 - 11 bits       110xxxxx 10xxxxxx\n//  12 - 16 bits       1110xxxx 10xxxxxx 10xxxxxx\n//  17 - 21 bits       11110xxx 10xxxxxx 10xxxxxx 10xxxxxx\n\n// The maximum code-point a one-byte UTF-8 sequence can represent.\nconst UInt32 kMaxCodePoint1 = (static_cast<UInt32>(1) <<  7) - 1;\n\n// The maximum code-point a two-byte UTF-8 sequence can represent.\nconst UInt32 kMaxCodePoint2 = (static_cast<UInt32>(1) << (5 + 6)) - 1;\n\n// The maximum code-point a three-byte UTF-8 sequence can represent.\nconst UInt32 kMaxCodePoint3 = (static_cast<UInt32>(1) << (4 + 2*6)) - 1;\n\n// The maximum code-point a four-byte UTF-8 sequence can represent.\nconst UInt32 kMaxCodePoint4 = (static_cast<UInt32>(1) << (3 + 3*6)) - 1;\n\n// Chops off the n lowest bits from a bit pattern.  Returns the n\n// lowest bits.  As a side effect, the original bit pattern will be\n// shifted to the right by n bits.\ninline UInt32 ChopLowBits(UInt32* bits, int n) {\n  const UInt32 low_bits = *bits & ((static_cast<UInt32>(1) << n) - 1);\n  *bits >>= n;\n  return low_bits;\n}\n\n// Converts a Unicode code point to a narrow string in UTF-8 encoding.\n// code_point parameter is of type UInt32 because wchar_t may not be\n// wide enough to contain a code point.\n// If the code_point is not a valid Unicode code point\n// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted\n// to \"(Invalid Unicode 0xXXXXXXXX)\".\nstd::string CodePointToUtf8(UInt32 code_point) {\n  if (code_point > kMaxCodePoint4) {\n    return \"(Invalid Unicode 0x\" + String::FormatHexInt(code_point) + \")\";\n  }\n\n  char str[5];  // Big enough for the largest valid code point.\n  if (code_point <= kMaxCodePoint1) {\n    str[1] = '\\0';\n    str[0] = static_cast<char>(code_point);                          // 0xxxxxxx\n  } else if (code_point <= kMaxCodePoint2) {\n    str[2] = '\\0';\n    str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx\n    str[0] = static_cast<char>(0xC0 | code_point);                   // 110xxxxx\n  } else if (code_point <= kMaxCodePoint3) {\n    str[3] = '\\0';\n    str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx\n    str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx\n    str[0] = static_cast<char>(0xE0 | code_point);                   // 1110xxxx\n  } else {  // code_point <= kMaxCodePoint4\n    str[4] = '\\0';\n    str[3] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx\n    str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx\n    str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx\n    str[0] = static_cast<char>(0xF0 | code_point);                   // 11110xxx\n  }\n  return str;\n}\n\n// The following two functions only make sense if the the system\n// uses UTF-16 for wide string encoding. All supported systems\n// with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16.\n\n// Determines if the arguments constitute UTF-16 surrogate pair\n// and thus should be combined into a single Unicode code point\n// using CreateCodePointFromUtf16SurrogatePair.\ninline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) {\n  return sizeof(wchar_t) == 2 &&\n      (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00;\n}\n\n// Creates a Unicode code point from UTF16 surrogate pair.\ninline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first,\n                                                    wchar_t second) {\n  const UInt32 mask = (1 << 10) - 1;\n  return (sizeof(wchar_t) == 2) ?\n      (((first & mask) << 10) | (second & mask)) + 0x10000 :\n      // This function should not be called when the condition is\n      // false, but we provide a sensible default in case it is.\n      static_cast<UInt32>(first);\n}\n\n// Converts a wide string to a narrow string in UTF-8 encoding.\n// The wide string is assumed to have the following encoding:\n//   UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)\n//   UTF-32 if sizeof(wchar_t) == 4 (on Linux)\n// Parameter str points to a null-terminated wide string.\n// Parameter num_chars may additionally limit the number\n// of wchar_t characters processed. -1 is used when the entire string\n// should be processed.\n// If the string contains code points that are not valid Unicode code points\n// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output\n// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding\n// and contains invalid UTF-16 surrogate pairs, values in those pairs\n// will be encoded as individual Unicode characters from Basic Normal Plane.\nstd::string WideStringToUtf8(const wchar_t* str, int num_chars) {\n  if (num_chars == -1)\n    num_chars = static_cast<int>(wcslen(str));\n\n  ::std::stringstream stream;\n  for (int i = 0; i < num_chars; ++i) {\n    UInt32 unicode_code_point;\n\n    if (str[i] == L'\\0') {\n      break;\n    } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) {\n      unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i],\n                                                                 str[i + 1]);\n      i++;\n    } else {\n      unicode_code_point = static_cast<UInt32>(str[i]);\n    }\n\n    stream << CodePointToUtf8(unicode_code_point);\n  }\n  return StringStreamToString(&stream);\n}\n\n// Converts a wide C string to an std::string using the UTF-8 encoding.\n// NULL will be converted to \"(null)\".\nstd::string String::ShowWideCString(const wchar_t * wide_c_str) {\n  if (wide_c_str == NULL)  return \"(null)\";\n\n  return internal::WideStringToUtf8(wide_c_str, -1);\n}\n\n// Compares two wide C strings.  Returns true iff they have the same\n// content.\n//\n// Unlike wcscmp(), this function can handle NULL argument(s).  A NULL\n// C string is considered different to any non-NULL C string,\n// including the empty string.\nbool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) {\n  if (lhs == NULL) return rhs == NULL;\n\n  if (rhs == NULL) return false;\n\n  return wcscmp(lhs, rhs) == 0;\n}\n\n// Helper function for *_STREQ on wide strings.\nAssertionResult CmpHelperSTREQ(const char* expected_expression,\n                               const char* actual_expression,\n                               const wchar_t* expected,\n                               const wchar_t* actual) {\n  if (String::WideCStringEquals(expected, actual)) {\n    return AssertionSuccess();\n  }\n\n  return EqFailure(expected_expression,\n                   actual_expression,\n                   PrintToString(expected),\n                   PrintToString(actual),\n                   false);\n}\n\n// Helper function for *_STRNE on wide strings.\nAssertionResult CmpHelperSTRNE(const char* s1_expression,\n                               const char* s2_expression,\n                               const wchar_t* s1,\n                               const wchar_t* s2) {\n  if (!String::WideCStringEquals(s1, s2)) {\n    return AssertionSuccess();\n  }\n\n  return AssertionFailure() << \"Expected: (\" << s1_expression << \") != (\"\n                            << s2_expression << \"), actual: \"\n                            << PrintToString(s1)\n                            << \" vs \" << PrintToString(s2);\n}\n\n// Compares two C strings, ignoring case.  Returns true iff they have\n// the same content.\n//\n// Unlike strcasecmp(), this function can handle NULL argument(s).  A\n// NULL C string is considered different to any non-NULL C string,\n// including the empty string.\nbool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) {\n  if (lhs == NULL)\n    return rhs == NULL;\n  if (rhs == NULL)\n    return false;\n  return posix::StrCaseCmp(lhs, rhs) == 0;\n}\n\n  // Compares two wide C strings, ignoring case.  Returns true iff they\n  // have the same content.\n  //\n  // Unlike wcscasecmp(), this function can handle NULL argument(s).\n  // A NULL C string is considered different to any non-NULL wide C string,\n  // including the empty string.\n  // NB: The implementations on different platforms slightly differ.\n  // On windows, this method uses _wcsicmp which compares according to LC_CTYPE\n  // environment variable. On GNU platform this method uses wcscasecmp\n  // which compares according to LC_CTYPE category of the current locale.\n  // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the\n  // current locale.\nbool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs,\n                                              const wchar_t* rhs) {\n  if (lhs == NULL) return rhs == NULL;\n\n  if (rhs == NULL) return false;\n\n#if GTEST_OS_WINDOWS\n  return _wcsicmp(lhs, rhs) == 0;\n#elif GTEST_OS_LINUX && !GTEST_OS_LINUX_ANDROID\n  return wcscasecmp(lhs, rhs) == 0;\n#else\n  // Android, Mac OS X and Cygwin don't define wcscasecmp.\n  // Other unknown OSes may not define it either.\n  wint_t left, right;\n  do {\n    left = towlower(*lhs++);\n    right = towlower(*rhs++);\n  } while (left && left == right);\n  return left == right;\n#endif  // OS selector\n}\n\n// Returns true iff str ends with the given suffix, ignoring case.\n// Any string is considered to end with an empty suffix.\nbool String::EndsWithCaseInsensitive(\n    const std::string& str, const std::string& suffix) {\n  const size_t str_len = str.length();\n  const size_t suffix_len = suffix.length();\n  return (str_len >= suffix_len) &&\n         CaseInsensitiveCStringEquals(str.c_str() + str_len - suffix_len,\n                                      suffix.c_str());\n}\n\n// Formats an int value as \"%02d\".\nstd::string String::FormatIntWidth2(int value) {\n  std::stringstream ss;\n  ss << std::setfill('0') << std::setw(2) << value;\n  return ss.str();\n}\n\n// Formats an int value as \"%X\".\nstd::string String::FormatHexInt(int value) {\n  std::stringstream ss;\n  ss << std::hex << std::uppercase << value;\n  return ss.str();\n}\n\n// Formats a byte as \"%02X\".\nstd::string String::FormatByte(unsigned char value) {\n  std::stringstream ss;\n  ss << std::setfill('0') << std::setw(2) << std::hex << std::uppercase\n     << static_cast<unsigned int>(value);\n  return ss.str();\n}\n\n// Converts the buffer in a stringstream to an std::string, converting NUL\n// bytes to \"\\\\0\" along the way.\nstd::string StringStreamToString(::std::stringstream* ss) {\n  const ::std::string& str = ss->str();\n  const char* const start = str.c_str();\n  const char* const end = start + str.length();\n\n  std::string result;\n  result.reserve(2 * (end - start));\n  for (const char* ch = start; ch != end; ++ch) {\n    if (*ch == '\\0') {\n      result += \"\\\\0\";  // Replaces NUL with \"\\\\0\";\n    } else {\n      result += *ch;\n    }\n  }\n\n  return result;\n}\n\n// Appends the user-supplied message to the Google-Test-generated message.\nstd::string AppendUserMessage(const std::string& gtest_msg,\n                              const Message& user_msg) {\n  // Appends the user message if it's non-empty.\n  const std::string user_msg_string = user_msg.GetString();\n  if (user_msg_string.empty()) {\n    return gtest_msg;\n  }\n\n  return gtest_msg + \"\\n\" + user_msg_string;\n}\n\n}  // namespace internal\n\n// class TestResult\n\n// Creates an empty TestResult.\nTestResult::TestResult()\n    : death_test_count_(0),\n      elapsed_time_(0) {\n}\n\n// D'tor.\nTestResult::~TestResult() {\n}\n\n// Returns the i-th test part result among all the results. i can\n// range from 0 to total_part_count() - 1. If i is not in that range,\n// aborts the program.\nconst TestPartResult& TestResult::GetTestPartResult(int i) const {\n  if (i < 0 || i >= total_part_count())\n    internal::posix::Abort();\n  return test_part_results_.at(i);\n}\n\n// Returns the i-th test property. i can range from 0 to\n// test_property_count() - 1. If i is not in that range, aborts the\n// program.\nconst TestProperty& TestResult::GetTestProperty(int i) const {\n  if (i < 0 || i >= test_property_count())\n    internal::posix::Abort();\n  return test_properties_.at(i);\n}\n\n// Clears the test part results.\nvoid TestResult::ClearTestPartResults() {\n  test_part_results_.clear();\n}\n\n// Adds a test part result to the list.\nvoid TestResult::AddTestPartResult(const TestPartResult& test_part_result) {\n  test_part_results_.push_back(test_part_result);\n}\n\n// Adds a test property to the list. If a property with the same key as the\n// supplied property is already represented, the value of this test_property\n// replaces the old value for that key.\nvoid TestResult::RecordProperty(const std::string& xml_element,\n                                const TestProperty& test_property) {\n  if (!ValidateTestProperty(xml_element, test_property)) {\n    return;\n  }\n  internal::MutexLock lock(&test_properites_mutex_);\n  const std::vector<TestProperty>::iterator property_with_matching_key =\n      std::find_if(test_properties_.begin(), test_properties_.end(),\n                   internal::TestPropertyKeyIs(test_property.key()));\n  if (property_with_matching_key == test_properties_.end()) {\n    test_properties_.push_back(test_property);\n    return;\n  }\n  property_with_matching_key->SetValue(test_property.value());\n}\n\n// The list of reserved attributes used in the <testsuites> element of XML\n// output.\nstatic const char* const kReservedTestSuitesAttributes[] = {\n  \"disabled\",\n  \"errors\",\n  \"failures\",\n  \"name\",\n  \"random_seed\",\n  \"tests\",\n  \"time\",\n  \"timestamp\"\n};\n\n// The list of reserved attributes used in the <testsuite> element of XML\n// output.\nstatic const char* const kReservedTestSuiteAttributes[] = {\n  \"disabled\",\n  \"errors\",\n  \"failures\",\n  \"name\",\n  \"tests\",\n  \"time\"\n};\n\n// The list of reserved attributes used in the <testcase> element of XML output.\nstatic const char* const kReservedTestCaseAttributes[] = {\n  \"classname\",\n  \"name\",\n  \"status\",\n  \"time\",\n  \"type_param\",\n  \"value_param\"\n};\n\ntemplate <int kSize>\nstd::vector<std::string> ArrayAsVector(const char* const (&array)[kSize]) {\n  return std::vector<std::string>(array, array + kSize);\n}\n\nstatic std::vector<std::string> GetReservedAttributesForElement(\n    const std::string& xml_element) {\n  if (xml_element == \"testsuites\") {\n    return ArrayAsVector(kReservedTestSuitesAttributes);\n  } else if (xml_element == \"testsuite\") {\n    return ArrayAsVector(kReservedTestSuiteAttributes);\n  } else if (xml_element == \"testcase\") {\n    return ArrayAsVector(kReservedTestCaseAttributes);\n  } else {\n    GTEST_CHECK_(false) << \"Unrecognized xml_element provided: \" << xml_element;\n  }\n  // This code is unreachable but some compilers may not realizes that.\n  return std::vector<std::string>();\n}\n\nstatic std::string FormatWordList(const std::vector<std::string>& words) {\n  Message word_list;\n  for (size_t i = 0; i < words.size(); ++i) {\n    if (i > 0 && words.size() > 2) {\n      word_list << \", \";\n    }\n    if (i == words.size() - 1) {\n      word_list << \"and \";\n    }\n    word_list << \"'\" << words[i] << \"'\";\n  }\n  return word_list.GetString();\n}\n\nbool ValidateTestPropertyName(const std::string& property_name,\n                              const std::vector<std::string>& reserved_names) {\n  if (std::find(reserved_names.begin(), reserved_names.end(), property_name) !=\n          reserved_names.end()) {\n    ADD_FAILURE() << \"Reserved key used in RecordProperty(): \" << property_name\n                  << \" (\" << FormatWordList(reserved_names)\n                  << \" are reserved by \" << GTEST_NAME_ << \")\";\n    return false;\n  }\n  return true;\n}\n\n// Adds a failure if the key is a reserved attribute of the element named\n// xml_element.  Returns true if the property is valid.\nbool TestResult::ValidateTestProperty(const std::string& xml_element,\n                                      const TestProperty& test_property) {\n  return ValidateTestPropertyName(test_property.key(),\n                                  GetReservedAttributesForElement(xml_element));\n}\n\n// Clears the object.\nvoid TestResult::Clear() {\n  test_part_results_.clear();\n  test_properties_.clear();\n  death_test_count_ = 0;\n  elapsed_time_ = 0;\n}\n\n// Returns true iff the test failed.\nbool TestResult::Failed() const {\n  for (int i = 0; i < total_part_count(); ++i) {\n    if (GetTestPartResult(i).failed())\n      return true;\n  }\n  return false;\n}\n\n// Returns true iff the test part fatally failed.\nstatic bool TestPartFatallyFailed(const TestPartResult& result) {\n  return result.fatally_failed();\n}\n\n// Returns true iff the test fatally failed.\nbool TestResult::HasFatalFailure() const {\n  return CountIf(test_part_results_, TestPartFatallyFailed) > 0;\n}\n\n// Returns true iff the test part non-fatally failed.\nstatic bool TestPartNonfatallyFailed(const TestPartResult& result) {\n  return result.nonfatally_failed();\n}\n\n// Returns true iff the test has a non-fatal failure.\nbool TestResult::HasNonfatalFailure() const {\n  return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0;\n}\n\n// Gets the number of all test parts.  This is the sum of the number\n// of successful test parts and the number of failed test parts.\nint TestResult::total_part_count() const {\n  return static_cast<int>(test_part_results_.size());\n}\n\n// Returns the number of the test properties.\nint TestResult::test_property_count() const {\n  return static_cast<int>(test_properties_.size());\n}\n\n// class Test\n\n// Creates a Test object.\n\n// The c'tor saves the states of all flags.\nTest::Test()\n    : gtest_flag_saver_(new GTEST_FLAG_SAVER_) {\n}\n\n// The d'tor restores the states of all flags.  The actual work is\n// done by the d'tor of the gtest_flag_saver_ field, and thus not\n// visible here.\nTest::~Test() {\n}\n\n// Sets up the test fixture.\n//\n// A sub-class may override this.\nvoid Test::SetUp() {\n}\n\n// Tears down the test fixture.\n//\n// A sub-class may override this.\nvoid Test::TearDown() {\n}\n\n// Allows user supplied key value pairs to be recorded for later output.\nvoid Test::RecordProperty(const std::string& key, const std::string& value) {\n  UnitTest::GetInstance()->RecordProperty(key, value);\n}\n\n// Allows user supplied key value pairs to be recorded for later output.\nvoid Test::RecordProperty(const std::string& key, int value) {\n  Message value_message;\n  value_message << value;\n  RecordProperty(key, value_message.GetString().c_str());\n}\n\nnamespace internal {\n\nvoid ReportFailureInUnknownLocation(TestPartResult::Type result_type,\n                                    const std::string& message) {\n  // This function is a friend of UnitTest and as such has access to\n  // AddTestPartResult.\n  UnitTest::GetInstance()->AddTestPartResult(\n      result_type,\n      NULL,  // No info about the source file where the exception occurred.\n      -1,    // We have no info on which line caused the exception.\n      message,\n      \"\");   // No stack trace, either.\n}\n\n}  // namespace internal\n\n// Google Test requires all tests in the same test case to use the same test\n// fixture class.  This function checks if the current test has the\n// same fixture class as the first test in the current test case.  If\n// yes, it returns true; otherwise it generates a Google Test failure and\n// returns false.\nbool Test::HasSameFixtureClass() {\n  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();\n  const TestCase* const test_case = impl->current_test_case();\n\n  // Info about the first test in the current test case.\n  const TestInfo* const first_test_info = test_case->test_info_list()[0];\n  const internal::TypeId first_fixture_id = first_test_info->fixture_class_id_;\n  const char* const first_test_name = first_test_info->name();\n\n  // Info about the current test.\n  const TestInfo* const this_test_info = impl->current_test_info();\n  const internal::TypeId this_fixture_id = this_test_info->fixture_class_id_;\n  const char* const this_test_name = this_test_info->name();\n\n  if (this_fixture_id != first_fixture_id) {\n    // Is the first test defined using TEST?\n    const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId();\n    // Is this test defined using TEST?\n    const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId();\n\n    if (first_is_TEST || this_is_TEST) {\n      // Both TEST and TEST_F appear in same test case, which is incorrect.\n      // Tell the user how to fix this.\n\n      // Gets the name of the TEST and the name of the TEST_F.  Note\n      // that first_is_TEST and this_is_TEST cannot both be true, as\n      // the fixture IDs are different for the two tests.\n      const char* const TEST_name =\n          first_is_TEST ? first_test_name : this_test_name;\n      const char* const TEST_F_name =\n          first_is_TEST ? this_test_name : first_test_name;\n\n      ADD_FAILURE()\n          << \"All tests in the same test case must use the same test fixture\\n\"\n          << \"class, so mixing TEST_F and TEST in the same test case is\\n\"\n          << \"illegal.  In test case \" << this_test_info->test_case_name()\n          << \",\\n\"\n          << \"test \" << TEST_F_name << \" is defined using TEST_F but\\n\"\n          << \"test \" << TEST_name << \" is defined using TEST.  You probably\\n\"\n          << \"want to change the TEST to TEST_F or move it to another test\\n\"\n          << \"case.\";\n    } else {\n      // Two fixture classes with the same name appear in two different\n      // namespaces, which is not allowed. Tell the user how to fix this.\n      ADD_FAILURE()\n          << \"All tests in the same test case must use the same test fixture\\n\"\n          << \"class.  However, in test case \"\n          << this_test_info->test_case_name() << \",\\n\"\n          << \"you defined test \" << first_test_name\n          << \" and test \" << this_test_name << \"\\n\"\n          << \"using two different test fixture classes.  This can happen if\\n\"\n          << \"the two classes are from different namespaces or translation\\n\"\n          << \"units and have the same name.  You should probably rename one\\n\"\n          << \"of the classes to put the tests into different test cases.\";\n    }\n    return false;\n  }\n\n  return true;\n}\n\n#if GTEST_HAS_SEH\n\n// Adds an \"exception thrown\" fatal failure to the current test.  This\n// function returns its result via an output parameter pointer because VC++\n// prohibits creation of objects with destructors on stack in functions\n// using __try (see error C2712).\nstatic std::string* FormatSehExceptionMessage(DWORD exception_code,\n                                              const char* location) {\n  Message message;\n  message << \"SEH exception with code 0x\" << std::setbase(16) <<\n    exception_code << std::setbase(10) << \" thrown in \" << location << \".\";\n\n  return new std::string(message.GetString());\n}\n\n#endif  // GTEST_HAS_SEH\n\nnamespace internal {\n\n#if GTEST_HAS_EXCEPTIONS\n\n// Adds an \"exception thrown\" fatal failure to the current test.\nstatic std::string FormatCxxExceptionMessage(const char* description,\n                                             const char* location) {\n  Message message;\n  if (description != NULL) {\n    message << \"C++ exception with description \\\"\" << description << \"\\\"\";\n  } else {\n    message << \"Unknown C++ exception\";\n  }\n  message << \" thrown in \" << location << \".\";\n\n  return message.GetString();\n}\n\nstatic std::string PrintTestPartResultToString(\n    const TestPartResult& test_part_result);\n\nGoogleTestFailureException::GoogleTestFailureException(\n    const TestPartResult& failure)\n    : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {}\n\n#endif  // GTEST_HAS_EXCEPTIONS\n\n// We put these helper functions in the internal namespace as IBM's xlC\n// compiler rejects the code if they were declared static.\n\n// Runs the given method and handles SEH exceptions it throws, when\n// SEH is supported; returns the 0-value for type Result in case of an\n// SEH exception.  (Microsoft compilers cannot handle SEH and C++\n// exceptions in the same function.  Therefore, we provide a separate\n// wrapper function for handling SEH exceptions.)\ntemplate <class T, typename Result>\nResult HandleSehExceptionsInMethodIfSupported(\n    T* object, Result (T::*method)(), const char* location) {\n#if GTEST_HAS_SEH\n  __try {\n    return (object->*method)();\n  } __except (internal::UnitTestOptions::GTestShouldProcessSEH(  // NOLINT\n      GetExceptionCode())) {\n    // We create the exception message on the heap because VC++ prohibits\n    // creation of objects with destructors on stack in functions using __try\n    // (see error C2712).\n    std::string* exception_message = FormatSehExceptionMessage(\n        GetExceptionCode(), location);\n    internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure,\n                                             *exception_message);\n    delete exception_message;\n    return static_cast<Result>(0);\n  }\n#else\n  (void)location;\n  return (object->*method)();\n#endif  // GTEST_HAS_SEH\n}\n\n// Runs the given method and catches and reports C++ and/or SEH-style\n// exceptions, if they are supported; returns the 0-value for type\n// Result in case of an SEH exception.\ntemplate <class T, typename Result>\nResult HandleExceptionsInMethodIfSupported(\n    T* object, Result (T::*method)(), const char* location) {\n  // NOTE: The user code can affect the way in which Google Test handles\n  // exceptions by setting GTEST_FLAG(catch_exceptions), but only before\n  // RUN_ALL_TESTS() starts. It is technically possible to check the flag\n  // after the exception is caught and either report or re-throw the\n  // exception based on the flag's value:\n  //\n  // try {\n  //   // Perform the test method.\n  // } catch (...) {\n  //   if (GTEST_FLAG(catch_exceptions))\n  //     // Report the exception as failure.\n  //   else\n  //     throw;  // Re-throws the original exception.\n  // }\n  //\n  // However, the purpose of this flag is to allow the program to drop into\n  // the debugger when the exception is thrown. On most platforms, once the\n  // control enters the catch block, the exception origin information is\n  // lost and the debugger will stop the program at the point of the\n  // re-throw in this function -- instead of at the point of the original\n  // throw statement in the code under test.  For this reason, we perform\n  // the check early, sacrificing the ability to affect Google Test's\n  // exception handling in the method where the exception is thrown.\n  if (internal::GetUnitTestImpl()->catch_exceptions()) {\n#if GTEST_HAS_EXCEPTIONS\n    try {\n      return HandleSehExceptionsInMethodIfSupported(object, method, location);\n    } catch (const internal::GoogleTestFailureException&) {  // NOLINT\n      // This exception type can only be thrown by a failed Google\n      // Test assertion with the intention of letting another testing\n      // framework catch it.  Therefore we just re-throw it.\n      throw;\n    } catch (const std::exception& e) {  // NOLINT\n      internal::ReportFailureInUnknownLocation(\n          TestPartResult::kFatalFailure,\n          FormatCxxExceptionMessage(e.what(), location));\n    } catch (...) {  // NOLINT\n      internal::ReportFailureInUnknownLocation(\n          TestPartResult::kFatalFailure,\n          FormatCxxExceptionMessage(NULL, location));\n    }\n    return static_cast<Result>(0);\n#else\n    return HandleSehExceptionsInMethodIfSupported(object, method, location);\n#endif  // GTEST_HAS_EXCEPTIONS\n  } else {\n    return (object->*method)();\n  }\n}\n\n}  // namespace internal\n\n// Runs the test and updates the test result.\nvoid Test::Run() {\n  if (!HasSameFixtureClass()) return;\n\n  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();\n  impl->os_stack_trace_getter()->UponLeavingGTest();\n  internal::HandleExceptionsInMethodIfSupported(this, &Test::SetUp, \"SetUp()\");\n  // We will run the test only if SetUp() was successful.\n  if (!HasFatalFailure()) {\n    impl->os_stack_trace_getter()->UponLeavingGTest();\n    internal::HandleExceptionsInMethodIfSupported(\n        this, &Test::TestBody, \"the test body\");\n  }\n\n  // However, we want to clean up as much as possible.  Hence we will\n  // always call TearDown(), even if SetUp() or the test body has\n  // failed.\n  impl->os_stack_trace_getter()->UponLeavingGTest();\n  internal::HandleExceptionsInMethodIfSupported(\n      this, &Test::TearDown, \"TearDown()\");\n}\n\n// Returns true iff the current test has a fatal failure.\nbool Test::HasFatalFailure() {\n  return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure();\n}\n\n// Returns true iff the current test has a non-fatal failure.\nbool Test::HasNonfatalFailure() {\n  return internal::GetUnitTestImpl()->current_test_result()->\n      HasNonfatalFailure();\n}\n\n// class TestInfo\n\n// Constructs a TestInfo object. It assumes ownership of the test factory\n// object.\nTestInfo::TestInfo(const std::string& a_test_case_name,\n                   const std::string& a_name,\n                   const char* a_type_param,\n                   const char* a_value_param,\n                   internal::CodeLocation a_code_location,\n                   internal::TypeId fixture_class_id,\n                   internal::TestFactoryBase* factory)\n    : test_case_name_(a_test_case_name),\n      name_(a_name),\n      type_param_(a_type_param ? new std::string(a_type_param) : NULL),\n      value_param_(a_value_param ? new std::string(a_value_param) : NULL),\n      location_(a_code_location),\n      fixture_class_id_(fixture_class_id),\n      should_run_(false),\n      is_disabled_(false),\n      matches_filter_(false),\n      factory_(factory),\n      result_() {}\n\n// Destructs a TestInfo object.\nTestInfo::~TestInfo() { delete factory_; }\n\nnamespace internal {\n\n// Creates a new TestInfo object and registers it with Google Test;\n// returns the created object.\n//\n// Arguments:\n//\n//   test_case_name:   name of the test case\n//   name:             name of the test\n//   type_param:       the name of the test's type parameter, or NULL if\n//                     this is not a typed or a type-parameterized test.\n//   value_param:      text representation of the test's value parameter,\n//                     or NULL if this is not a value-parameterized test.\n//   code_location:    code location where the test is defined\n//   fixture_class_id: ID of the test fixture class\n//   set_up_tc:        pointer to the function that sets up the test case\n//   tear_down_tc:     pointer to the function that tears down the test case\n//   factory:          pointer to the factory that creates a test object.\n//                     The newly created TestInfo instance will assume\n//                     ownership of the factory object.\nTestInfo* MakeAndRegisterTestInfo(\n    const char* test_case_name,\n    const char* name,\n    const char* type_param,\n    const char* value_param,\n    CodeLocation code_location,\n    TypeId fixture_class_id,\n    SetUpTestCaseFunc set_up_tc,\n    TearDownTestCaseFunc tear_down_tc,\n    TestFactoryBase* factory) {\n  TestInfo* const test_info =\n      new TestInfo(test_case_name, name, type_param, value_param,\n                   code_location, fixture_class_id, factory);\n  GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info);\n  return test_info;\n}\n\n#if GTEST_HAS_PARAM_TEST\nvoid ReportInvalidTestCaseType(const char* test_case_name,\n                               CodeLocation code_location) {\n  Message errors;\n  errors\n      << \"Attempted redefinition of test case \" << test_case_name << \".\\n\"\n      << \"All tests in the same test case must use the same test fixture\\n\"\n      << \"class.  However, in test case \" << test_case_name << \", you tried\\n\"\n      << \"to define a test using a fixture class different from the one\\n\"\n      << \"used earlier. This can happen if the two fixture classes are\\n\"\n      << \"from different namespaces and have the same name. You should\\n\"\n      << \"probably rename one of the classes to put the tests into different\\n\"\n      << \"test cases.\";\n\n  fprintf(stderr, \"%s %s\",\n          FormatFileLocation(code_location.file.c_str(),\n                             code_location.line).c_str(),\n          errors.GetString().c_str());\n}\n#endif  // GTEST_HAS_PARAM_TEST\n\n}  // namespace internal\n\nnamespace {\n\n// A predicate that checks the test name of a TestInfo against a known\n// value.\n//\n// This is used for implementation of the TestCase class only.  We put\n// it in the anonymous namespace to prevent polluting the outer\n// namespace.\n//\n// TestNameIs is copyable.\nclass TestNameIs {\n public:\n  // Constructor.\n  //\n  // TestNameIs has NO default constructor.\n  explicit TestNameIs(const char* name)\n      : name_(name) {}\n\n  // Returns true iff the test name of test_info matches name_.\n  bool operator()(const TestInfo * test_info) const {\n    return test_info && test_info->name() == name_;\n  }\n\n private:\n  std::string name_;\n};\n\n}  // namespace\n\nnamespace internal {\n\n// This method expands all parameterized tests registered with macros TEST_P\n// and INSTANTIATE_TEST_CASE_P into regular tests and registers those.\n// This will be done just once during the program runtime.\nvoid UnitTestImpl::RegisterParameterizedTests() {\n#if GTEST_HAS_PARAM_TEST\n  if (!parameterized_tests_registered_) {\n    parameterized_test_registry_.RegisterTests();\n    parameterized_tests_registered_ = true;\n  }\n#endif\n}\n\n}  // namespace internal\n\n// Creates the test object, runs it, records its result, and then\n// deletes it.\nvoid TestInfo::Run() {\n  if (!should_run_) return;\n\n  // Tells UnitTest where to store test result.\n  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();\n  impl->set_current_test_info(this);\n\n  TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();\n\n  // Notifies the unit test event listeners that a test is about to start.\n  repeater->OnTestStart(*this);\n\n  const TimeInMillis start = internal::GetTimeInMillis();\n\n  impl->os_stack_trace_getter()->UponLeavingGTest();\n\n  // Creates the test object.\n  Test* const test = internal::HandleExceptionsInMethodIfSupported(\n      factory_, &internal::TestFactoryBase::CreateTest,\n      \"the test fixture's constructor\");\n\n  // Runs the test only if the test object was created and its\n  // constructor didn't generate a fatal failure.\n  if ((test != NULL) && !Test::HasFatalFailure()) {\n    // This doesn't throw as all user code that can throw are wrapped into\n    // exception handling code.\n    test->Run();\n  }\n\n  // Deletes the test object.\n  impl->os_stack_trace_getter()->UponLeavingGTest();\n  internal::HandleExceptionsInMethodIfSupported(\n      test, &Test::DeleteSelf_, \"the test fixture's destructor\");\n\n  result_.set_elapsed_time(internal::GetTimeInMillis() - start);\n\n  // Notifies the unit test event listener that a test has just finished.\n  repeater->OnTestEnd(*this);\n\n  // Tells UnitTest to stop associating assertion results to this\n  // test.\n  impl->set_current_test_info(NULL);\n}\n\n// class TestCase\n\n// Gets the number of successful tests in this test case.\nint TestCase::successful_test_count() const {\n  return CountIf(test_info_list_, TestPassed);\n}\n\n// Gets the number of failed tests in this test case.\nint TestCase::failed_test_count() const {\n  return CountIf(test_info_list_, TestFailed);\n}\n\n// Gets the number of disabled tests that will be reported in the XML report.\nint TestCase::reportable_disabled_test_count() const {\n  return CountIf(test_info_list_, TestReportableDisabled);\n}\n\n// Gets the number of disabled tests in this test case.\nint TestCase::disabled_test_count() const {\n  return CountIf(test_info_list_, TestDisabled);\n}\n\n// Gets the number of tests to be printed in the XML report.\nint TestCase::reportable_test_count() const {\n  return CountIf(test_info_list_, TestReportable);\n}\n\n// Get the number of tests in this test case that should run.\nint TestCase::test_to_run_count() const {\n  return CountIf(test_info_list_, ShouldRunTest);\n}\n\n// Gets the number of all tests.\nint TestCase::total_test_count() const {\n  return static_cast<int>(test_info_list_.size());\n}\n\n// Creates a TestCase with the given name.\n//\n// Arguments:\n//\n//   name:         name of the test case\n//   a_type_param: the name of the test case's type parameter, or NULL if\n//                 this is not a typed or a type-parameterized test case.\n//   set_up_tc:    pointer to the function that sets up the test case\n//   tear_down_tc: pointer to the function that tears down the test case\nTestCase::TestCase(const char* a_name, const char* a_type_param,\n                   Test::SetUpTestCaseFunc set_up_tc,\n                   Test::TearDownTestCaseFunc tear_down_tc)\n    : name_(a_name),\n      type_param_(a_type_param ? new std::string(a_type_param) : NULL),\n      set_up_tc_(set_up_tc),\n      tear_down_tc_(tear_down_tc),\n      should_run_(false),\n      elapsed_time_(0) {\n}\n\n// Destructor of TestCase.\nTestCase::~TestCase() {\n  // Deletes every Test in the collection.\n  ForEach(test_info_list_, internal::Delete<TestInfo>);\n}\n\n// Returns the i-th test among all the tests. i can range from 0 to\n// total_test_count() - 1. If i is not in that range, returns NULL.\nconst TestInfo* TestCase::GetTestInfo(int i) const {\n  const int index = GetElementOr(test_indices_, i, -1);\n  return index < 0 ? NULL : test_info_list_[index];\n}\n\n// Returns the i-th test among all the tests. i can range from 0 to\n// total_test_count() - 1. If i is not in that range, returns NULL.\nTestInfo* TestCase::GetMutableTestInfo(int i) {\n  const int index = GetElementOr(test_indices_, i, -1);\n  return index < 0 ? NULL : test_info_list_[index];\n}\n\n// Adds a test to this test case.  Will delete the test upon\n// destruction of the TestCase object.\nvoid TestCase::AddTestInfo(TestInfo * test_info) {\n  test_info_list_.push_back(test_info);\n  test_indices_.push_back(static_cast<int>(test_indices_.size()));\n}\n\n// Runs every test in this TestCase.\nvoid TestCase::Run() {\n  if (!should_run_) return;\n\n  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();\n  impl->set_current_test_case(this);\n\n  TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();\n\n  repeater->OnTestCaseStart(*this);\n  impl->os_stack_trace_getter()->UponLeavingGTest();\n  internal::HandleExceptionsInMethodIfSupported(\n      this, &TestCase::RunSetUpTestCase, \"SetUpTestCase()\");\n\n  const internal::TimeInMillis start = internal::GetTimeInMillis();\n  for (int i = 0; i < total_test_count(); i++) {\n    GetMutableTestInfo(i)->Run();\n  }\n  elapsed_time_ = internal::GetTimeInMillis() - start;\n\n  impl->os_stack_trace_getter()->UponLeavingGTest();\n  internal::HandleExceptionsInMethodIfSupported(\n      this, &TestCase::RunTearDownTestCase, \"TearDownTestCase()\");\n\n  repeater->OnTestCaseEnd(*this);\n  impl->set_current_test_case(NULL);\n}\n\n// Clears the results of all tests in this test case.\nvoid TestCase::ClearResult() {\n  ad_hoc_test_result_.Clear();\n  ForEach(test_info_list_, TestInfo::ClearTestResult);\n}\n\n// Shuffles the tests in this test case.\nvoid TestCase::ShuffleTests(internal::Random* random) {\n  Shuffle(random, &test_indices_);\n}\n\n// Restores the test order to before the first shuffle.\nvoid TestCase::UnshuffleTests() {\n  for (size_t i = 0; i < test_indices_.size(); i++) {\n    test_indices_[i] = static_cast<int>(i);\n  }\n}\n\n// Formats a countable noun.  Depending on its quantity, either the\n// singular form or the plural form is used. e.g.\n//\n// FormatCountableNoun(1, \"formula\", \"formuli\") returns \"1 formula\".\n// FormatCountableNoun(5, \"book\", \"books\") returns \"5 books\".\nstatic std::string FormatCountableNoun(int count,\n                                       const char * singular_form,\n                                       const char * plural_form) {\n  return internal::StreamableToString(count) + \" \" +\n      (count == 1 ? singular_form : plural_form);\n}\n\n// Formats the count of tests.\nstatic std::string FormatTestCount(int test_count) {\n  return FormatCountableNoun(test_count, \"test\", \"tests\");\n}\n\n// Formats the count of test cases.\nstatic std::string FormatTestCaseCount(int test_case_count) {\n  return FormatCountableNoun(test_case_count, \"test case\", \"test cases\");\n}\n\n// Converts a TestPartResult::Type enum to human-friendly string\n// representation.  Both kNonFatalFailure and kFatalFailure are translated\n// to \"Failure\", as the user usually doesn't care about the difference\n// between the two when viewing the test result.\nstatic const char * TestPartResultTypeToString(TestPartResult::Type type) {\n  switch (type) {\n    case TestPartResult::kSuccess:\n      return \"Success\";\n\n    case TestPartResult::kNonFatalFailure:\n    case TestPartResult::kFatalFailure:\n#ifdef _MSC_VER\n      return \"error: \";\n#else\n      return \"Failure\\n\";\n#endif\n    default:\n      return \"Unknown result type\";\n  }\n}\n\nnamespace internal {\n\n// Prints a TestPartResult to an std::string.\nstatic std::string PrintTestPartResultToString(\n    const TestPartResult& test_part_result) {\n  return (Message()\n          << internal::FormatFileLocation(test_part_result.file_name(),\n                                          test_part_result.line_number())\n          << \" \" << TestPartResultTypeToString(test_part_result.type())\n          << test_part_result.message()).GetString();\n}\n\n// Prints a TestPartResult.\nstatic void PrintTestPartResult(const TestPartResult& test_part_result) {\n  const std::string& result =\n      PrintTestPartResultToString(test_part_result);\n  printf(\"%s\\n\", result.c_str());\n  fflush(stdout);\n  // If the test program runs in Visual Studio or a debugger, the\n  // following statements add the test part result message to the Output\n  // window such that the user can double-click on it to jump to the\n  // corresponding source code location; otherwise they do nothing.\n#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE\n  // We don't call OutputDebugString*() on Windows Mobile, as printing\n  // to stdout is done by OutputDebugString() there already - we don't\n  // want the same message printed twice.\n  ::OutputDebugStringA(result.c_str());\n  ::OutputDebugStringA(\"\\n\");\n#endif\n}\n\n// class PrettyUnitTestResultPrinter\n\nenum GTestColor {\n  COLOR_DEFAULT,\n  COLOR_RED,\n  COLOR_GREEN,\n  COLOR_YELLOW\n};\n\n#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \\\n    !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT\n\n// Returns the character attribute for the given color.\nWORD GetColorAttribute(GTestColor color) {\n  switch (color) {\n    case COLOR_RED:    return FOREGROUND_RED;\n    case COLOR_GREEN:  return FOREGROUND_GREEN;\n    case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN;\n    default:           return 0;\n  }\n}\n\n#else\n\n// Returns the ANSI color code for the given color.  COLOR_DEFAULT is\n// an invalid input.\nconst char* GetAnsiColorCode(GTestColor color) {\n  switch (color) {\n    case COLOR_RED:     return \"1\";\n    case COLOR_GREEN:   return \"2\";\n    case COLOR_YELLOW:  return \"3\";\n    default:            return NULL;\n  };\n}\n\n#endif  // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE\n\n// Returns true iff Google Test should use colors in the output.\nbool ShouldUseColor(bool stdout_is_tty) {\n  const char* const gtest_color = GTEST_FLAG(color).c_str();\n\n  if (String::CaseInsensitiveCStringEquals(gtest_color, \"auto\")) {\n#if GTEST_OS_WINDOWS\n    // On Windows the TERM variable is usually not set, but the\n    // console there does support colors.\n    return stdout_is_tty;\n#else\n    // On non-Windows platforms, we rely on the TERM variable.\n    const char* const term = posix::GetEnv(\"TERM\");\n    const bool term_supports_color =\n        String::CStringEquals(term, \"xterm\") ||\n        String::CStringEquals(term, \"xterm-color\") ||\n        String::CStringEquals(term, \"xterm-256color\") ||\n        String::CStringEquals(term, \"screen\") ||\n        String::CStringEquals(term, \"screen-256color\") ||\n        String::CStringEquals(term, \"rxvt-unicode\") ||\n        String::CStringEquals(term, \"rxvt-unicode-256color\") ||\n        String::CStringEquals(term, \"linux\") ||\n        String::CStringEquals(term, \"cygwin\");\n    return stdout_is_tty && term_supports_color;\n#endif  // GTEST_OS_WINDOWS\n  }\n\n  return String::CaseInsensitiveCStringEquals(gtest_color, \"yes\") ||\n      String::CaseInsensitiveCStringEquals(gtest_color, \"true\") ||\n      String::CaseInsensitiveCStringEquals(gtest_color, \"t\") ||\n      String::CStringEquals(gtest_color, \"1\");\n  // We take \"yes\", \"true\", \"t\", and \"1\" as meaning \"yes\".  If the\n  // value is neither one of these nor \"auto\", we treat it as \"no\" to\n  // be conservative.\n}\n\n// Helpers for printing colored strings to stdout. Note that on Windows, we\n// cannot simply emit special characters and have the terminal change colors.\n// This routine must actually emit the characters rather than return a string\n// that would be colored when printed, as can be done on Linux.\nvoid ColoredPrintf(GTestColor color, const char* fmt, ...) {\n  va_list args;\n  va_start(args, fmt);\n\n#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS || \\\n    GTEST_OS_IOS || GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT\n  const bool use_color = AlwaysFalse();\n#else\n  static const bool in_color_mode =\n      ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0);\n  const bool use_color = in_color_mode && (color != COLOR_DEFAULT);\n#endif  // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS\n  // The '!= 0' comparison is necessary to satisfy MSVC 7.1.\n\n  if (!use_color) {\n    vprintf(fmt, args);\n    va_end(args);\n    return;\n  }\n\n#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \\\n    !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT\n  const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);\n\n  // Gets the current text color.\n  CONSOLE_SCREEN_BUFFER_INFO buffer_info;\n  GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);\n  const WORD old_color_attrs = buffer_info.wAttributes;\n\n  // We need to flush the stream buffers into the console before each\n  // SetConsoleTextAttribute call lest it affect the text that is already\n  // printed but has not yet reached the console.\n  fflush(stdout);\n  SetConsoleTextAttribute(stdout_handle,\n                          GetColorAttribute(color) | FOREGROUND_INTENSITY);\n  vprintf(fmt, args);\n\n  fflush(stdout);\n  // Restores the text color.\n  SetConsoleTextAttribute(stdout_handle, old_color_attrs);\n#else\n  printf(\"\\033[0;3%sm\", GetAnsiColorCode(color));\n  vprintf(fmt, args);\n  printf(\"\\033[m\");  // Resets the terminal to default.\n#endif  // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE\n  va_end(args);\n}\n\n// Text printed in Google Test's text output and --gunit_list_tests\n// output to label the type parameter and value parameter for a test.\nstatic const char kTypeParamLabel[] = \"TypeParam\";\nstatic const char kValueParamLabel[] = \"GetParam()\";\n\nvoid PrintFullTestCommentIfPresent(const TestInfo& test_info) {\n  const char* const type_param = test_info.type_param();\n  const char* const value_param = test_info.value_param();\n\n  if (type_param != NULL || value_param != NULL) {\n    printf(\", where \");\n    if (type_param != NULL) {\n      printf(\"%s = %s\", kTypeParamLabel, type_param);\n      if (value_param != NULL)\n        printf(\" and \");\n    }\n    if (value_param != NULL) {\n      printf(\"%s = %s\", kValueParamLabel, value_param);\n    }\n  }\n}\n\n// This class implements the TestEventListener interface.\n//\n// Class PrettyUnitTestResultPrinter is copyable.\nclass PrettyUnitTestResultPrinter : public TestEventListener {\n public:\n  PrettyUnitTestResultPrinter() {}\n  static void PrintTestName(const char * test_case, const char * test) {\n    printf(\"%s.%s\", test_case, test);\n  }\n\n  // The following methods override what's in the TestEventListener class.\n  virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}\n  virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);\n  virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);\n  virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}\n  virtual void OnTestCaseStart(const TestCase& test_case);\n  virtual void OnTestStart(const TestInfo& test_info);\n  virtual void OnTestPartResult(const TestPartResult& result);\n  virtual void OnTestEnd(const TestInfo& test_info);\n  virtual void OnTestCaseEnd(const TestCase& test_case);\n  virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);\n  virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}\n  virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);\n  virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}\n\n private:\n  static void PrintFailedTests(const UnitTest& unit_test);\n};\n\n  // Fired before each iteration of tests starts.\nvoid PrettyUnitTestResultPrinter::OnTestIterationStart(\n    const UnitTest& unit_test, int iteration) {\n  if (GTEST_FLAG(repeat) != 1)\n    printf(\"\\nRepeating all tests (iteration %d) . . .\\n\\n\", iteration + 1);\n\n  const char* const filter = GTEST_FLAG(filter).c_str();\n\n  // Prints the filter if it's not *.  This reminds the user that some\n  // tests may be skipped.\n  if (!String::CStringEquals(filter, kUniversalFilter)) {\n    ColoredPrintf(COLOR_YELLOW,\n                  \"Note: %s filter = %s\\n\", GTEST_NAME_, filter);\n  }\n\n  if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) {\n    const Int32 shard_index = Int32FromEnvOrDie(kTestShardIndex, -1);\n    ColoredPrintf(COLOR_YELLOW,\n                  \"Note: This is test shard %d of %s.\\n\",\n                  static_cast<int>(shard_index) + 1,\n                  internal::posix::GetEnv(kTestTotalShards));\n  }\n\n  if (GTEST_FLAG(shuffle)) {\n    ColoredPrintf(COLOR_YELLOW,\n                  \"Note: Randomizing tests' orders with a seed of %d .\\n\",\n                  unit_test.random_seed());\n  }\n\n  ColoredPrintf(COLOR_GREEN,  \"[==========] \");\n  printf(\"Running %s from %s.\\n\",\n         FormatTestCount(unit_test.test_to_run_count()).c_str(),\n         FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());\n  fflush(stdout);\n}\n\nvoid PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart(\n    const UnitTest& /*unit_test*/) {\n  ColoredPrintf(COLOR_GREEN,  \"[----------] \");\n  printf(\"Global test environment set-up.\\n\");\n  fflush(stdout);\n}\n\nvoid PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) {\n  const std::string counts =\n      FormatCountableNoun(test_case.test_to_run_count(), \"test\", \"tests\");\n  ColoredPrintf(COLOR_GREEN, \"[----------] \");\n  printf(\"%s from %s\", counts.c_str(), test_case.name());\n  if (test_case.type_param() == NULL) {\n    printf(\"\\n\");\n  } else {\n    printf(\", where %s = %s\\n\", kTypeParamLabel, test_case.type_param());\n  }\n  fflush(stdout);\n}\n\nvoid PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) {\n  ColoredPrintf(COLOR_GREEN,  \"[ RUN      ] \");\n  PrintTestName(test_info.test_case_name(), test_info.name());\n  printf(\"\\n\");\n  fflush(stdout);\n}\n\n// Called after an assertion failure.\nvoid PrettyUnitTestResultPrinter::OnTestPartResult(\n    const TestPartResult& result) {\n  // If the test part succeeded, we don't need to do anything.\n  if (result.type() == TestPartResult::kSuccess)\n    return;\n\n  // Print failure message from the assertion (e.g. expected this and got that).\n  PrintTestPartResult(result);\n  fflush(stdout);\n}\n\nvoid PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) {\n  if (test_info.result()->Passed()) {\n    ColoredPrintf(COLOR_GREEN, \"[       OK ] \");\n  } else {\n    ColoredPrintf(COLOR_RED, \"[  FAILED  ] \");\n  }\n  PrintTestName(test_info.test_case_name(), test_info.name());\n  if (test_info.result()->Failed())\n    PrintFullTestCommentIfPresent(test_info);\n\n  if (GTEST_FLAG(print_time)) {\n    printf(\" (%s ms)\\n\", internal::StreamableToString(\n           test_info.result()->elapsed_time()).c_str());\n  } else {\n    printf(\"\\n\");\n  }\n  fflush(stdout);\n}\n\nvoid PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) {\n  if (!GTEST_FLAG(print_time)) return;\n\n  const std::string counts =\n      FormatCountableNoun(test_case.test_to_run_count(), \"test\", \"tests\");\n  ColoredPrintf(COLOR_GREEN, \"[----------] \");\n  printf(\"%s from %s (%s ms total)\\n\\n\",\n         counts.c_str(), test_case.name(),\n         internal::StreamableToString(test_case.elapsed_time()).c_str());\n  fflush(stdout);\n}\n\nvoid PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart(\n    const UnitTest& /*unit_test*/) {\n  ColoredPrintf(COLOR_GREEN,  \"[----------] \");\n  printf(\"Global test environment tear-down\\n\");\n  fflush(stdout);\n}\n\n// Internal helper for printing the list of failed tests.\nvoid PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) {\n  const int failed_test_count = unit_test.failed_test_count();\n  if (failed_test_count == 0) {\n    return;\n  }\n\n  for (int i = 0; i < unit_test.total_test_case_count(); ++i) {\n    const TestCase& test_case = *unit_test.GetTestCase(i);\n    if (!test_case.should_run() || (test_case.failed_test_count() == 0)) {\n      continue;\n    }\n    for (int j = 0; j < test_case.total_test_count(); ++j) {\n      const TestInfo& test_info = *test_case.GetTestInfo(j);\n      if (!test_info.should_run() || test_info.result()->Passed()) {\n        continue;\n      }\n      ColoredPrintf(COLOR_RED, \"[  FAILED  ] \");\n      printf(\"%s.%s\", test_case.name(), test_info.name());\n      PrintFullTestCommentIfPresent(test_info);\n      printf(\"\\n\");\n    }\n  }\n}\n\nvoid PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,\n                                                     int /*iteration*/) {\n  ColoredPrintf(COLOR_GREEN,  \"[==========] \");\n  printf(\"%s from %s ran.\",\n         FormatTestCount(unit_test.test_to_run_count()).c_str(),\n         FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());\n  if (GTEST_FLAG(print_time)) {\n    printf(\" (%s ms total)\",\n           internal::StreamableToString(unit_test.elapsed_time()).c_str());\n  }\n  printf(\"\\n\");\n  ColoredPrintf(COLOR_GREEN,  \"[  PASSED  ] \");\n  printf(\"%s.\\n\", FormatTestCount(unit_test.successful_test_count()).c_str());\n\n  int num_failures = unit_test.failed_test_count();\n  if (!unit_test.Passed()) {\n    const int failed_test_count = unit_test.failed_test_count();\n    ColoredPrintf(COLOR_RED,  \"[  FAILED  ] \");\n    printf(\"%s, listed below:\\n\", FormatTestCount(failed_test_count).c_str());\n    PrintFailedTests(unit_test);\n    printf(\"\\n%2d FAILED %s\\n\", num_failures,\n                        num_failures == 1 ? \"TEST\" : \"TESTS\");\n  }\n\n  int num_disabled = unit_test.reportable_disabled_test_count();\n  if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) {\n    if (!num_failures) {\n      printf(\"\\n\");  // Add a spacer if no FAILURE banner is displayed.\n    }\n    ColoredPrintf(COLOR_YELLOW,\n                  \"  YOU HAVE %d DISABLED %s\\n\\n\",\n                  num_disabled,\n                  num_disabled == 1 ? \"TEST\" : \"TESTS\");\n  }\n  // Ensure that Google Test output is printed before, e.g., heapchecker output.\n  fflush(stdout);\n}\n\n// End PrettyUnitTestResultPrinter\n\n// class TestEventRepeater\n//\n// This class forwards events to other event listeners.\nclass TestEventRepeater : public TestEventListener {\n public:\n  TestEventRepeater() : forwarding_enabled_(true) {}\n  virtual ~TestEventRepeater();\n  void Append(TestEventListener *listener);\n  TestEventListener* Release(TestEventListener* listener);\n\n  // Controls whether events will be forwarded to listeners_. Set to false\n  // in death test child processes.\n  bool forwarding_enabled() const { return forwarding_enabled_; }\n  void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; }\n\n  virtual void OnTestProgramStart(const UnitTest& unit_test);\n  virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);\n  virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);\n  virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test);\n  virtual void OnTestCaseStart(const TestCase& test_case);\n  virtual void OnTestStart(const TestInfo& test_info);\n  virtual void OnTestPartResult(const TestPartResult& result);\n  virtual void OnTestEnd(const TestInfo& test_info);\n  virtual void OnTestCaseEnd(const TestCase& test_case);\n  virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);\n  virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test);\n  virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);\n  virtual void OnTestProgramEnd(const UnitTest& unit_test);\n\n private:\n  // Controls whether events will be forwarded to listeners_. Set to false\n  // in death test child processes.\n  bool forwarding_enabled_;\n  // The list of listeners that receive events.\n  std::vector<TestEventListener*> listeners_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater);\n};\n\nTestEventRepeater::~TestEventRepeater() {\n  ForEach(listeners_, Delete<TestEventListener>);\n}\n\nvoid TestEventRepeater::Append(TestEventListener *listener) {\n  listeners_.push_back(listener);\n}\n\n// TODO(vladl@google.com): Factor the search functionality into Vector::Find.\nTestEventListener* TestEventRepeater::Release(TestEventListener *listener) {\n  for (size_t i = 0; i < listeners_.size(); ++i) {\n    if (listeners_[i] == listener) {\n      listeners_.erase(listeners_.begin() + i);\n      return listener;\n    }\n  }\n\n  return NULL;\n}\n\n// Since most methods are very similar, use macros to reduce boilerplate.\n// This defines a member that forwards the call to all listeners.\n#define GTEST_REPEATER_METHOD_(Name, Type) \\\nvoid TestEventRepeater::Name(const Type& parameter) { \\\n  if (forwarding_enabled_) { \\\n    for (size_t i = 0; i < listeners_.size(); i++) { \\\n      listeners_[i]->Name(parameter); \\\n    } \\\n  } \\\n}\n// This defines a member that forwards the call to all listeners in reverse\n// order.\n#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \\\nvoid TestEventRepeater::Name(const Type& parameter) { \\\n  if (forwarding_enabled_) { \\\n    for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) { \\\n      listeners_[i]->Name(parameter); \\\n    } \\\n  } \\\n}\n\nGTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest)\nGTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest)\nGTEST_REPEATER_METHOD_(OnTestCaseStart, TestCase)\nGTEST_REPEATER_METHOD_(OnTestStart, TestInfo)\nGTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult)\nGTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest)\nGTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest)\nGTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest)\nGTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo)\nGTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestCase)\nGTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest)\n\n#undef GTEST_REPEATER_METHOD_\n#undef GTEST_REVERSE_REPEATER_METHOD_\n\nvoid TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test,\n                                             int iteration) {\n  if (forwarding_enabled_) {\n    for (size_t i = 0; i < listeners_.size(); i++) {\n      listeners_[i]->OnTestIterationStart(unit_test, iteration);\n    }\n  }\n}\n\nvoid TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test,\n                                           int iteration) {\n  if (forwarding_enabled_) {\n    for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) {\n      listeners_[i]->OnTestIterationEnd(unit_test, iteration);\n    }\n  }\n}\n\n// End TestEventRepeater\n\n// This class generates an XML output file.\nclass XmlUnitTestResultPrinter : public EmptyTestEventListener {\n public:\n  explicit XmlUnitTestResultPrinter(const char* output_file);\n\n  virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);\n\n private:\n  // Is c a whitespace character that is normalized to a space character\n  // when it appears in an XML attribute value?\n  static bool IsNormalizableWhitespace(char c) {\n    return c == 0x9 || c == 0xA || c == 0xD;\n  }\n\n  // May c appear in a well-formed XML document?\n  static bool IsValidXmlCharacter(char c) {\n    return IsNormalizableWhitespace(c) || c >= 0x20;\n  }\n\n  // Returns an XML-escaped copy of the input string str.  If\n  // is_attribute is true, the text is meant to appear as an attribute\n  // value, and normalizable whitespace is preserved by replacing it\n  // with character references.\n  static std::string EscapeXml(const std::string& str, bool is_attribute);\n\n  // Returns the given string with all characters invalid in XML removed.\n  static std::string RemoveInvalidXmlCharacters(const std::string& str);\n\n  // Convenience wrapper around EscapeXml when str is an attribute value.\n  static std::string EscapeXmlAttribute(const std::string& str) {\n    return EscapeXml(str, true);\n  }\n\n  // Convenience wrapper around EscapeXml when str is not an attribute value.\n  static std::string EscapeXmlText(const char* str) {\n    return EscapeXml(str, false);\n  }\n\n  // Verifies that the given attribute belongs to the given element and\n  // streams the attribute as XML.\n  static void OutputXmlAttribute(std::ostream* stream,\n                                 const std::string& element_name,\n                                 const std::string& name,\n                                 const std::string& value);\n\n  // Streams an XML CDATA section, escaping invalid CDATA sequences as needed.\n  static void OutputXmlCDataSection(::std::ostream* stream, const char* data);\n\n  // Streams an XML representation of a TestInfo object.\n  static void OutputXmlTestInfo(::std::ostream* stream,\n                                const char* test_case_name,\n                                const TestInfo& test_info);\n\n  // Prints an XML representation of a TestCase object\n  static void PrintXmlTestCase(::std::ostream* stream,\n                               const TestCase& test_case);\n\n  // Prints an XML summary of unit_test to output stream out.\n  static void PrintXmlUnitTest(::std::ostream* stream,\n                               const UnitTest& unit_test);\n\n  // Produces a string representing the test properties in a result as space\n  // delimited XML attributes based on the property key=\"value\" pairs.\n  // When the std::string is not empty, it includes a space at the beginning,\n  // to delimit this attribute from prior attributes.\n  static std::string TestPropertiesAsXmlAttributes(const TestResult& result);\n\n  // The output file.\n  const std::string output_file_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter);\n};\n\n// Creates a new XmlUnitTestResultPrinter.\nXmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file)\n    : output_file_(output_file) {\n  if (output_file_.c_str() == NULL || output_file_.empty()) {\n    fprintf(stderr, \"XML output file may not be null\\n\");\n    fflush(stderr);\n    exit(EXIT_FAILURE);\n  }\n}\n\n// Called after the unit test ends.\nvoid XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,\n                                                  int /*iteration*/) {\n  FILE* xmlout = NULL;\n  FilePath output_file(output_file_);\n  FilePath output_dir(output_file.RemoveFileName());\n\n  if (output_dir.CreateDirectoriesRecursively()) {\n    xmlout = posix::FOpen(output_file_.c_str(), \"w\");\n  }\n  if (xmlout == NULL) {\n    // TODO(wan): report the reason of the failure.\n    //\n    // We don't do it for now as:\n    //\n    //   1. There is no urgent need for it.\n    //   2. It's a bit involved to make the errno variable thread-safe on\n    //      all three operating systems (Linux, Windows, and Mac OS).\n    //   3. To interpret the meaning of errno in a thread-safe way,\n    //      we need the strerror_r() function, which is not available on\n    //      Windows.\n    fprintf(stderr,\n            \"Unable to open file \\\"%s\\\"\\n\",\n            output_file_.c_str());\n    fflush(stderr);\n    exit(EXIT_FAILURE);\n  }\n  std::stringstream stream;\n  PrintXmlUnitTest(&stream, unit_test);\n  fprintf(xmlout, \"%s\", StringStreamToString(&stream).c_str());\n  fclose(xmlout);\n}\n\n// Returns an XML-escaped copy of the input string str.  If is_attribute\n// is true, the text is meant to appear as an attribute value, and\n// normalizable whitespace is preserved by replacing it with character\n// references.\n//\n// Invalid XML characters in str, if any, are stripped from the output.\n// It is expected that most, if not all, of the text processed by this\n// module will consist of ordinary English text.\n// If this module is ever modified to produce version 1.1 XML output,\n// most invalid characters can be retained using character references.\n// TODO(wan): It might be nice to have a minimally invasive, human-readable\n// escaping scheme for invalid characters, rather than dropping them.\nstd::string XmlUnitTestResultPrinter::EscapeXml(\n    const std::string& str, bool is_attribute) {\n  Message m;\n\n  for (size_t i = 0; i < str.size(); ++i) {\n    const char ch = str[i];\n    switch (ch) {\n      case '<':\n        m << \"&lt;\";\n        break;\n      case '>':\n        m << \"&gt;\";\n        break;\n      case '&':\n        m << \"&amp;\";\n        break;\n      case '\\'':\n        if (is_attribute)\n          m << \"&apos;\";\n        else\n          m << '\\'';\n        break;\n      case '\"':\n        if (is_attribute)\n          m << \"&quot;\";\n        else\n          m << '\"';\n        break;\n      default:\n        if (IsValidXmlCharacter(ch)) {\n          if (is_attribute && IsNormalizableWhitespace(ch))\n            m << \"&#x\" << String::FormatByte(static_cast<unsigned char>(ch))\n              << \";\";\n          else\n            m << ch;\n        }\n        break;\n    }\n  }\n\n  return m.GetString();\n}\n\n// Returns the given string with all characters invalid in XML removed.\n// Currently invalid characters are dropped from the string. An\n// alternative is to replace them with certain characters such as . or ?.\nstd::string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters(\n    const std::string& str) {\n  std::string output;\n  output.reserve(str.size());\n  for (std::string::const_iterator it = str.begin(); it != str.end(); ++it)\n    if (IsValidXmlCharacter(*it))\n      output.push_back(*it);\n\n  return output;\n}\n\n// The following routines generate an XML representation of a UnitTest\n// object.\n//\n// This is how Google Test concepts map to the DTD:\n//\n// <testsuites name=\"AllTests\">        <-- corresponds to a UnitTest object\n//   <testsuite name=\"testcase-name\">  <-- corresponds to a TestCase object\n//     <testcase name=\"test-name\">     <-- corresponds to a TestInfo object\n//       <failure message=\"...\">...</failure>\n//       <failure message=\"...\">...</failure>\n//       <failure message=\"...\">...</failure>\n//                                     <-- individual assertion failures\n//     </testcase>\n//   </testsuite>\n// </testsuites>\n\n// Formats the given time in milliseconds as seconds.\nstd::string FormatTimeInMillisAsSeconds(TimeInMillis ms) {\n  ::std::stringstream ss;\n  ss << (static_cast<double>(ms) * 1e-3);\n  return ss.str();\n}\n\nstatic bool PortableLocaltime(time_t seconds, struct tm* out) {\n#if defined(_MSC_VER)\n  return localtime_s(out, &seconds) == 0;\n#elif defined(__MINGW32__) || defined(__MINGW64__)\n  // MINGW <time.h> provides neither localtime_r nor localtime_s, but uses\n  // Windows' localtime(), which has a thread-local tm buffer.\n  struct tm* tm_ptr = localtime(&seconds);  // NOLINT\n  if (tm_ptr == NULL)\n    return false;\n  *out = *tm_ptr;\n  return true;\n#else\n  return localtime_r(&seconds, out) != NULL;\n#endif\n}\n\n// Converts the given epoch time in milliseconds to a date string in the ISO\n// 8601 format, without the timezone information.\nstd::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms) {\n  struct tm time_struct;\n  if (!PortableLocaltime(static_cast<time_t>(ms / 1000), &time_struct))\n    return \"\";\n  // YYYY-MM-DDThh:mm:ss\n  return StreamableToString(time_struct.tm_year + 1900) + \"-\" +\n      String::FormatIntWidth2(time_struct.tm_mon + 1) + \"-\" +\n      String::FormatIntWidth2(time_struct.tm_mday) + \"T\" +\n      String::FormatIntWidth2(time_struct.tm_hour) + \":\" +\n      String::FormatIntWidth2(time_struct.tm_min) + \":\" +\n      String::FormatIntWidth2(time_struct.tm_sec);\n}\n\n// Streams an XML CDATA section, escaping invalid CDATA sequences as needed.\nvoid XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream,\n                                                     const char* data) {\n  const char* segment = data;\n  *stream << \"<![CDATA[\";\n  for (;;) {\n    const char* const next_segment = strstr(segment, \"]]>\");\n    if (next_segment != NULL) {\n      stream->write(\n          segment, static_cast<std::streamsize>(next_segment - segment));\n      *stream << \"]]>]]&gt;<![CDATA[\";\n      segment = next_segment + strlen(\"]]>\");\n    } else {\n      *stream << segment;\n      break;\n    }\n  }\n  *stream << \"]]>\";\n}\n\nvoid XmlUnitTestResultPrinter::OutputXmlAttribute(\n    std::ostream* stream,\n    const std::string& element_name,\n    const std::string& name,\n    const std::string& value) {\n  const std::vector<std::string>& allowed_names =\n      GetReservedAttributesForElement(element_name);\n\n  GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) !=\n                   allowed_names.end())\n      << \"Attribute \" << name << \" is not allowed for element <\" << element_name\n      << \">.\";\n\n  *stream << \" \" << name << \"=\\\"\" << EscapeXmlAttribute(value) << \"\\\"\";\n}\n\n// Prints an XML representation of a TestInfo object.\n// TODO(wan): There is also value in printing properties with the plain printer.\nvoid XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream,\n                                                 const char* test_case_name,\n                                                 const TestInfo& test_info) {\n  const TestResult& result = *test_info.result();\n  const std::string kTestcase = \"testcase\";\n\n  *stream << \"    <testcase\";\n  OutputXmlAttribute(stream, kTestcase, \"name\", test_info.name());\n\n  if (test_info.value_param() != NULL) {\n    OutputXmlAttribute(stream, kTestcase, \"value_param\",\n                       test_info.value_param());\n  }\n  if (test_info.type_param() != NULL) {\n    OutputXmlAttribute(stream, kTestcase, \"type_param\", test_info.type_param());\n  }\n\n  OutputXmlAttribute(stream, kTestcase, \"status\",\n                     test_info.should_run() ? \"run\" : \"notrun\");\n  OutputXmlAttribute(stream, kTestcase, \"time\",\n                     FormatTimeInMillisAsSeconds(result.elapsed_time()));\n  OutputXmlAttribute(stream, kTestcase, \"classname\", test_case_name);\n  *stream << TestPropertiesAsXmlAttributes(result);\n\n  int failures = 0;\n  for (int i = 0; i < result.total_part_count(); ++i) {\n    const TestPartResult& part = result.GetTestPartResult(i);\n    if (part.failed()) {\n      if (++failures == 1) {\n        *stream << \">\\n\";\n      }\n      const string location = internal::FormatCompilerIndependentFileLocation(\n          part.file_name(), part.line_number());\n      const string summary = location + \"\\n\" + part.summary();\n      *stream << \"      <failure message=\\\"\"\n              << EscapeXmlAttribute(summary.c_str())\n              << \"\\\" type=\\\"\\\">\";\n      const string detail = location + \"\\n\" + part.message();\n      OutputXmlCDataSection(stream, RemoveInvalidXmlCharacters(detail).c_str());\n      *stream << \"</failure>\\n\";\n    }\n  }\n\n  if (failures == 0)\n    *stream << \" />\\n\";\n  else\n    *stream << \"    </testcase>\\n\";\n}\n\n// Prints an XML representation of a TestCase object\nvoid XmlUnitTestResultPrinter::PrintXmlTestCase(std::ostream* stream,\n                                                const TestCase& test_case) {\n  const std::string kTestsuite = \"testsuite\";\n  *stream << \"  <\" << kTestsuite;\n  OutputXmlAttribute(stream, kTestsuite, \"name\", test_case.name());\n  OutputXmlAttribute(stream, kTestsuite, \"tests\",\n                     StreamableToString(test_case.reportable_test_count()));\n  OutputXmlAttribute(stream, kTestsuite, \"failures\",\n                     StreamableToString(test_case.failed_test_count()));\n  OutputXmlAttribute(\n      stream, kTestsuite, \"disabled\",\n      StreamableToString(test_case.reportable_disabled_test_count()));\n  OutputXmlAttribute(stream, kTestsuite, \"errors\", \"0\");\n  OutputXmlAttribute(stream, kTestsuite, \"time\",\n                     FormatTimeInMillisAsSeconds(test_case.elapsed_time()));\n  *stream << TestPropertiesAsXmlAttributes(test_case.ad_hoc_test_result())\n          << \">\\n\";\n\n  for (int i = 0; i < test_case.total_test_count(); ++i) {\n    if (test_case.GetTestInfo(i)->is_reportable())\n      OutputXmlTestInfo(stream, test_case.name(), *test_case.GetTestInfo(i));\n  }\n  *stream << \"  </\" << kTestsuite << \">\\n\";\n}\n\n// Prints an XML summary of unit_test to output stream out.\nvoid XmlUnitTestResultPrinter::PrintXmlUnitTest(std::ostream* stream,\n                                                const UnitTest& unit_test) {\n  const std::string kTestsuites = \"testsuites\";\n\n  *stream << \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n\";\n  *stream << \"<\" << kTestsuites;\n\n  OutputXmlAttribute(stream, kTestsuites, \"tests\",\n                     StreamableToString(unit_test.reportable_test_count()));\n  OutputXmlAttribute(stream, kTestsuites, \"failures\",\n                     StreamableToString(unit_test.failed_test_count()));\n  OutputXmlAttribute(\n      stream, kTestsuites, \"disabled\",\n      StreamableToString(unit_test.reportable_disabled_test_count()));\n  OutputXmlAttribute(stream, kTestsuites, \"errors\", \"0\");\n  OutputXmlAttribute(\n      stream, kTestsuites, \"timestamp\",\n      FormatEpochTimeInMillisAsIso8601(unit_test.start_timestamp()));\n  OutputXmlAttribute(stream, kTestsuites, \"time\",\n                     FormatTimeInMillisAsSeconds(unit_test.elapsed_time()));\n\n  if (GTEST_FLAG(shuffle)) {\n    OutputXmlAttribute(stream, kTestsuites, \"random_seed\",\n                       StreamableToString(unit_test.random_seed()));\n  }\n\n  *stream << TestPropertiesAsXmlAttributes(unit_test.ad_hoc_test_result());\n\n  OutputXmlAttribute(stream, kTestsuites, \"name\", \"AllTests\");\n  *stream << \">\\n\";\n\n  for (int i = 0; i < unit_test.total_test_case_count(); ++i) {\n    if (unit_test.GetTestCase(i)->reportable_test_count() > 0)\n      PrintXmlTestCase(stream, *unit_test.GetTestCase(i));\n  }\n  *stream << \"</\" << kTestsuites << \">\\n\";\n}\n\n// Produces a string representing the test properties in a result as space\n// delimited XML attributes based on the property key=\"value\" pairs.\nstd::string XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes(\n    const TestResult& result) {\n  Message attributes;\n  for (int i = 0; i < result.test_property_count(); ++i) {\n    const TestProperty& property = result.GetTestProperty(i);\n    attributes << \" \" << property.key() << \"=\"\n        << \"\\\"\" << EscapeXmlAttribute(property.value()) << \"\\\"\";\n  }\n  return attributes.GetString();\n}\n\n// End XmlUnitTestResultPrinter\n\n#if GTEST_CAN_STREAM_RESULTS_\n\n// Checks if str contains '=', '&', '%' or '\\n' characters. If yes,\n// replaces them by \"%xx\" where xx is their hexadecimal value. For\n// example, replaces \"=\" with \"%3D\".  This algorithm is O(strlen(str))\n// in both time and space -- important as the input str may contain an\n// arbitrarily long test failure message and stack trace.\nstring StreamingListener::UrlEncode(const char* str) {\n  string result;\n  result.reserve(strlen(str) + 1);\n  for (char ch = *str; ch != '\\0'; ch = *++str) {\n    switch (ch) {\n      case '%':\n      case '=':\n      case '&':\n      case '\\n':\n        result.append(\"%\" + String::FormatByte(static_cast<unsigned char>(ch)));\n        break;\n      default:\n        result.push_back(ch);\n        break;\n    }\n  }\n  return result;\n}\n\nvoid StreamingListener::SocketWriter::MakeConnection() {\n  GTEST_CHECK_(sockfd_ == -1)\n      << \"MakeConnection() can't be called when there is already a connection.\";\n\n  addrinfo hints;\n  memset(&hints, 0, sizeof(hints));\n  hints.ai_family = AF_UNSPEC;    // To allow both IPv4 and IPv6 addresses.\n  hints.ai_socktype = SOCK_STREAM;\n  addrinfo* servinfo = NULL;\n\n  // Use the getaddrinfo() to get a linked list of IP addresses for\n  // the given host name.\n  const int error_num = getaddrinfo(\n      host_name_.c_str(), port_num_.c_str(), &hints, &servinfo);\n  if (error_num != 0) {\n    GTEST_LOG_(WARNING) << \"stream_result_to: getaddrinfo() failed: \"\n                        << gai_strerror(error_num);\n  }\n\n  // Loop through all the results and connect to the first we can.\n  for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != NULL;\n       cur_addr = cur_addr->ai_next) {\n    sockfd_ = socket(\n        cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol);\n    if (sockfd_ != -1) {\n      // Connect the client socket to the server socket.\n      if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) {\n        close(sockfd_);\n        sockfd_ = -1;\n      }\n    }\n  }\n\n  freeaddrinfo(servinfo);  // all done with this structure\n\n  if (sockfd_ == -1) {\n    GTEST_LOG_(WARNING) << \"stream_result_to: failed to connect to \"\n                        << host_name_ << \":\" << port_num_;\n  }\n}\n\n// End of class Streaming Listener\n#endif  // GTEST_CAN_STREAM_RESULTS__\n\n// Class ScopedTrace\n\n// Pushes the given source file location and message onto a per-thread\n// trace stack maintained by Google Test.\nScopedTrace::ScopedTrace(const char* file, int line, const Message& message)\n    GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) {\n  TraceInfo trace;\n  trace.file = file;\n  trace.line = line;\n  trace.message = message.GetString();\n\n  UnitTest::GetInstance()->PushGTestTrace(trace);\n}\n\n// Pops the info pushed by the c'tor.\nScopedTrace::~ScopedTrace()\n    GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) {\n  UnitTest::GetInstance()->PopGTestTrace();\n}\n\n\n// class OsStackTraceGetter\n\nconst char* const OsStackTraceGetterInterface::kElidedFramesMarker =\n    \"... \" GTEST_NAME_ \" internal frames ...\";\n\nstring OsStackTraceGetter::CurrentStackTrace(int /*max_depth*/,\n                                             int /*skip_count*/) {\n  return \"\";\n}\n\nvoid OsStackTraceGetter::UponLeavingGTest() {}\n\n// A helper class that creates the premature-exit file in its\n// constructor and deletes the file in its destructor.\nclass ScopedPrematureExitFile {\n public:\n  explicit ScopedPrematureExitFile(const char* premature_exit_filepath)\n      : premature_exit_filepath_(premature_exit_filepath) {\n    // If a path to the premature-exit file is specified...\n    if (premature_exit_filepath != NULL && *premature_exit_filepath != '\\0') {\n      // create the file with a single \"0\" character in it.  I/O\n      // errors are ignored as there's nothing better we can do and we\n      // don't want to fail the test because of this.\n      FILE* pfile = posix::FOpen(premature_exit_filepath, \"w\");\n      fwrite(\"0\", 1, 1, pfile);\n      fclose(pfile);\n    }\n  }\n\n  ~ScopedPrematureExitFile() {\n    if (premature_exit_filepath_ != NULL && *premature_exit_filepath_ != '\\0') {\n      remove(premature_exit_filepath_);\n    }\n  }\n\n private:\n  const char* const premature_exit_filepath_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedPrematureExitFile);\n};\n\n}  // namespace internal\n\n// class TestEventListeners\n\nTestEventListeners::TestEventListeners()\n    : repeater_(new internal::TestEventRepeater()),\n      default_result_printer_(NULL),\n      default_xml_generator_(NULL) {\n}\n\nTestEventListeners::~TestEventListeners() { delete repeater_; }\n\n// Returns the standard listener responsible for the default console\n// output.  Can be removed from the listeners list to shut down default\n// console output.  Note that removing this object from the listener list\n// with Release transfers its ownership to the user.\nvoid TestEventListeners::Append(TestEventListener* listener) {\n  repeater_->Append(listener);\n}\n\n// Removes the given event listener from the list and returns it.  It then\n// becomes the caller's responsibility to delete the listener. Returns\n// NULL if the listener is not found in the list.\nTestEventListener* TestEventListeners::Release(TestEventListener* listener) {\n  if (listener == default_result_printer_)\n    default_result_printer_ = NULL;\n  else if (listener == default_xml_generator_)\n    default_xml_generator_ = NULL;\n  return repeater_->Release(listener);\n}\n\n// Returns repeater that broadcasts the TestEventListener events to all\n// subscribers.\nTestEventListener* TestEventListeners::repeater() { return repeater_; }\n\n// Sets the default_result_printer attribute to the provided listener.\n// The listener is also added to the listener list and previous\n// default_result_printer is removed from it and deleted. The listener can\n// also be NULL in which case it will not be added to the list. Does\n// nothing if the previous and the current listener objects are the same.\nvoid TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) {\n  if (default_result_printer_ != listener) {\n    // It is an error to pass this method a listener that is already in the\n    // list.\n    delete Release(default_result_printer_);\n    default_result_printer_ = listener;\n    if (listener != NULL)\n      Append(listener);\n  }\n}\n\n// Sets the default_xml_generator attribute to the provided listener.  The\n// listener is also added to the listener list and previous\n// default_xml_generator is removed from it and deleted. The listener can\n// also be NULL in which case it will not be added to the list. Does\n// nothing if the previous and the current listener objects are the same.\nvoid TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) {\n  if (default_xml_generator_ != listener) {\n    // It is an error to pass this method a listener that is already in the\n    // list.\n    delete Release(default_xml_generator_);\n    default_xml_generator_ = listener;\n    if (listener != NULL)\n      Append(listener);\n  }\n}\n\n// Controls whether events will be forwarded by the repeater to the\n// listeners in the list.\nbool TestEventListeners::EventForwardingEnabled() const {\n  return repeater_->forwarding_enabled();\n}\n\nvoid TestEventListeners::SuppressEventForwarding() {\n  repeater_->set_forwarding_enabled(false);\n}\n\n// class UnitTest\n\n// Gets the singleton UnitTest object.  The first time this method is\n// called, a UnitTest object is constructed and returned.  Consecutive\n// calls will return the same object.\n//\n// We don't protect this under mutex_ as a user is not supposed to\n// call this before main() starts, from which point on the return\n// value will never change.\nUnitTest* UnitTest::GetInstance() {\n  // When compiled with MSVC 7.1 in optimized mode, destroying the\n  // UnitTest object upon exiting the program messes up the exit code,\n  // causing successful tests to appear failed.  We have to use a\n  // different implementation in this case to bypass the compiler bug.\n  // This implementation makes the compiler happy, at the cost of\n  // leaking the UnitTest object.\n\n  // CodeGear C++Builder insists on a public destructor for the\n  // default implementation.  Use this implementation to keep good OO\n  // design with private destructor.\n\n#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)\n  static UnitTest* const instance = new UnitTest;\n  return instance;\n#else\n  static UnitTest instance;\n  return &instance;\n#endif  // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)\n}\n\n// Gets the number of successful test cases.\nint UnitTest::successful_test_case_count() const {\n  return impl()->successful_test_case_count();\n}\n\n// Gets the number of failed test cases.\nint UnitTest::failed_test_case_count() const {\n  return impl()->failed_test_case_count();\n}\n\n// Gets the number of all test cases.\nint UnitTest::total_test_case_count() const {\n  return impl()->total_test_case_count();\n}\n\n// Gets the number of all test cases that contain at least one test\n// that should run.\nint UnitTest::test_case_to_run_count() const {\n  return impl()->test_case_to_run_count();\n}\n\n// Gets the number of successful tests.\nint UnitTest::successful_test_count() const {\n  return impl()->successful_test_count();\n}\n\n// Gets the number of failed tests.\nint UnitTest::failed_test_count() const { return impl()->failed_test_count(); }\n\n// Gets the number of disabled tests that will be reported in the XML report.\nint UnitTest::reportable_disabled_test_count() const {\n  return impl()->reportable_disabled_test_count();\n}\n\n// Gets the number of disabled tests.\nint UnitTest::disabled_test_count() const {\n  return impl()->disabled_test_count();\n}\n\n// Gets the number of tests to be printed in the XML report.\nint UnitTest::reportable_test_count() const {\n  return impl()->reportable_test_count();\n}\n\n// Gets the number of all tests.\nint UnitTest::total_test_count() const { return impl()->total_test_count(); }\n\n// Gets the number of tests that should run.\nint UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); }\n\n// Gets the time of the test program start, in ms from the start of the\n// UNIX epoch.\ninternal::TimeInMillis UnitTest::start_timestamp() const {\n    return impl()->start_timestamp();\n}\n\n// Gets the elapsed time, in milliseconds.\ninternal::TimeInMillis UnitTest::elapsed_time() const {\n  return impl()->elapsed_time();\n}\n\n// Returns true iff the unit test passed (i.e. all test cases passed).\nbool UnitTest::Passed() const { return impl()->Passed(); }\n\n// Returns true iff the unit test failed (i.e. some test case failed\n// or something outside of all tests failed).\nbool UnitTest::Failed() const { return impl()->Failed(); }\n\n// Gets the i-th test case among all the test cases. i can range from 0 to\n// total_test_case_count() - 1. If i is not in that range, returns NULL.\nconst TestCase* UnitTest::GetTestCase(int i) const {\n  return impl()->GetTestCase(i);\n}\n\n// Returns the TestResult containing information on test failures and\n// properties logged outside of individual test cases.\nconst TestResult& UnitTest::ad_hoc_test_result() const {\n  return *impl()->ad_hoc_test_result();\n}\n\n// Gets the i-th test case among all the test cases. i can range from 0 to\n// total_test_case_count() - 1. If i is not in that range, returns NULL.\nTestCase* UnitTest::GetMutableTestCase(int i) {\n  return impl()->GetMutableTestCase(i);\n}\n\n// Returns the list of event listeners that can be used to track events\n// inside Google Test.\nTestEventListeners& UnitTest::listeners() {\n  return *impl()->listeners();\n}\n\n// Registers and returns a global test environment.  When a test\n// program is run, all global test environments will be set-up in the\n// order they were registered.  After all tests in the program have\n// finished, all global test environments will be torn-down in the\n// *reverse* order they were registered.\n//\n// The UnitTest object takes ownership of the given environment.\n//\n// We don't protect this under mutex_, as we only support calling it\n// from the main thread.\nEnvironment* UnitTest::AddEnvironment(Environment* env) {\n  if (env == NULL) {\n    return NULL;\n  }\n\n  impl_->environments().push_back(env);\n  return env;\n}\n\n// Adds a TestPartResult to the current TestResult object.  All Google Test\n// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call\n// this to report their results.  The user code should use the\n// assertion macros instead of calling this directly.\nvoid UnitTest::AddTestPartResult(\n    TestPartResult::Type result_type,\n    const char* file_name,\n    int line_number,\n    const std::string& message,\n    const std::string& os_stack_trace) GTEST_LOCK_EXCLUDED_(mutex_) {\n  Message msg;\n  msg << message;\n\n  internal::MutexLock lock(&mutex_);\n  if (impl_->gtest_trace_stack().size() > 0) {\n    msg << \"\\n\" << GTEST_NAME_ << \" trace:\";\n\n    for (int i = static_cast<int>(impl_->gtest_trace_stack().size());\n         i > 0; --i) {\n      const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1];\n      msg << \"\\n\" << internal::FormatFileLocation(trace.file, trace.line)\n          << \" \" << trace.message;\n    }\n  }\n\n  if (os_stack_trace.c_str() != NULL && !os_stack_trace.empty()) {\n    msg << internal::kStackTraceMarker << os_stack_trace;\n  }\n\n  const TestPartResult result =\n    TestPartResult(result_type, file_name, line_number,\n                   msg.GetString().c_str());\n  impl_->GetTestPartResultReporterForCurrentThread()->\n      ReportTestPartResult(result);\n\n  if (result_type != TestPartResult::kSuccess) {\n    // gtest_break_on_failure takes precedence over\n    // gtest_throw_on_failure.  This allows a user to set the latter\n    // in the code (perhaps in order to use Google Test assertions\n    // with another testing framework) and specify the former on the\n    // command line for debugging.\n    if (GTEST_FLAG(break_on_failure)) {\n#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT\n      // Using DebugBreak on Windows allows gtest to still break into a debugger\n      // when a failure happens and both the --gtest_break_on_failure and\n      // the --gtest_catch_exceptions flags are specified.\n      DebugBreak();\n#else\n      // Dereference NULL through a volatile pointer to prevent the compiler\n      // from removing. We use this rather than abort() or __builtin_trap() for\n      // portability: Symbian doesn't implement abort() well, and some debuggers\n      // don't correctly trap abort().\n      *static_cast<volatile int*>(NULL) = 1;\n#endif  // GTEST_OS_WINDOWS\n    } else if (GTEST_FLAG(throw_on_failure)) {\n#if GTEST_HAS_EXCEPTIONS\n      throw internal::GoogleTestFailureException(result);\n#else\n      // We cannot call abort() as it generates a pop-up in debug mode\n      // that cannot be suppressed in VC 7.1 or below.\n      exit(1);\n#endif\n    }\n  }\n}\n\n// Adds a TestProperty to the current TestResult object when invoked from\n// inside a test, to current TestCase's ad_hoc_test_result_ when invoked\n// from SetUpTestCase or TearDownTestCase, or to the global property set\n// when invoked elsewhere.  If the result already contains a property with\n// the same key, the value will be updated.\nvoid UnitTest::RecordProperty(const std::string& key,\n                              const std::string& value) {\n  impl_->RecordProperty(TestProperty(key, value));\n}\n\n// Runs all tests in this UnitTest object and prints the result.\n// Returns 0 if successful, or 1 otherwise.\n//\n// We don't protect this under mutex_, as we only support calling it\n// from the main thread.\nint UnitTest::Run() {\n  const bool in_death_test_child_process =\n      internal::GTEST_FLAG(internal_run_death_test).length() > 0;\n\n  // Google Test implements this protocol for catching that a test\n  // program exits before returning control to Google Test:\n  //\n  //   1. Upon start, Google Test creates a file whose absolute path\n  //      is specified by the environment variable\n  //      TEST_PREMATURE_EXIT_FILE.\n  //   2. When Google Test has finished its work, it deletes the file.\n  //\n  // This allows a test runner to set TEST_PREMATURE_EXIT_FILE before\n  // running a Google-Test-based test program and check the existence\n  // of the file at the end of the test execution to see if it has\n  // exited prematurely.\n\n  // If we are in the child process of a death test, don't\n  // create/delete the premature exit file, as doing so is unnecessary\n  // and will confuse the parent process.  Otherwise, create/delete\n  // the file upon entering/leaving this function.  If the program\n  // somehow exits before this function has a chance to return, the\n  // premature-exit file will be left undeleted, causing a test runner\n  // that understands the premature-exit-file protocol to report the\n  // test as having failed.\n  const internal::ScopedPrematureExitFile premature_exit_file(\n      in_death_test_child_process ?\n      NULL : internal::posix::GetEnv(\"TEST_PREMATURE_EXIT_FILE\"));\n\n  // Captures the value of GTEST_FLAG(catch_exceptions).  This value will be\n  // used for the duration of the program.\n  impl()->set_catch_exceptions(GTEST_FLAG(catch_exceptions));\n\n#if GTEST_HAS_SEH\n  // Either the user wants Google Test to catch exceptions thrown by the\n  // tests or this is executing in the context of death test child\n  // process. In either case the user does not want to see pop-up dialogs\n  // about crashes - they are expected.\n  if (impl()->catch_exceptions() || in_death_test_child_process) {\n# if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT\n    // SetErrorMode doesn't exist on CE.\n    SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT |\n                 SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX);\n# endif  // !GTEST_OS_WINDOWS_MOBILE\n\n# if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE\n    // Death test children can be terminated with _abort().  On Windows,\n    // _abort() can show a dialog with a warning message.  This forces the\n    // abort message to go to stderr instead.\n    _set_error_mode(_OUT_TO_STDERR);\n# endif\n\n# if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE\n    // In the debug version, Visual Studio pops up a separate dialog\n    // offering a choice to debug the aborted program. We need to suppress\n    // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement\n    // executed. Google Test will notify the user of any unexpected\n    // failure via stderr.\n    //\n    // VC++ doesn't define _set_abort_behavior() prior to the version 8.0.\n    // Users of prior VC versions shall suffer the agony and pain of\n    // clicking through the countless debug dialogs.\n    // TODO(vladl@google.com): find a way to suppress the abort dialog() in the\n    // debug mode when compiled with VC 7.1 or lower.\n    if (!GTEST_FLAG(break_on_failure))\n      _set_abort_behavior(\n          0x0,                                    // Clear the following flags:\n          _WRITE_ABORT_MSG | _CALL_REPORTFAULT);  // pop-up window, core dump.\n# endif\n  }\n#endif  // GTEST_HAS_SEH\n\n  return internal::HandleExceptionsInMethodIfSupported(\n      impl(),\n      &internal::UnitTestImpl::RunAllTests,\n      \"auxiliary test code (environments or event listeners)\") ? 0 : 1;\n}\n\n// Returns the working directory when the first TEST() or TEST_F() was\n// executed.\nconst char* UnitTest::original_working_dir() const {\n  return impl_->original_working_dir_.c_str();\n}\n\n// Returns the TestCase object for the test that's currently running,\n// or NULL if no test is running.\nconst TestCase* UnitTest::current_test_case() const\n    GTEST_LOCK_EXCLUDED_(mutex_) {\n  internal::MutexLock lock(&mutex_);\n  return impl_->current_test_case();\n}\n\n// Returns the TestInfo object for the test that's currently running,\n// or NULL if no test is running.\nconst TestInfo* UnitTest::current_test_info() const\n    GTEST_LOCK_EXCLUDED_(mutex_) {\n  internal::MutexLock lock(&mutex_);\n  return impl_->current_test_info();\n}\n\n// Returns the random seed used at the start of the current test run.\nint UnitTest::random_seed() const { return impl_->random_seed(); }\n\n#if GTEST_HAS_PARAM_TEST\n// Returns ParameterizedTestCaseRegistry object used to keep track of\n// value-parameterized tests and instantiate and register them.\ninternal::ParameterizedTestCaseRegistry&\n    UnitTest::parameterized_test_registry()\n        GTEST_LOCK_EXCLUDED_(mutex_) {\n  return impl_->parameterized_test_registry();\n}\n#endif  // GTEST_HAS_PARAM_TEST\n\n// Creates an empty UnitTest.\nUnitTest::UnitTest() {\n  impl_ = new internal::UnitTestImpl(this);\n}\n\n// Destructor of UnitTest.\nUnitTest::~UnitTest() {\n  delete impl_;\n}\n\n// Pushes a trace defined by SCOPED_TRACE() on to the per-thread\n// Google Test trace stack.\nvoid UnitTest::PushGTestTrace(const internal::TraceInfo& trace)\n    GTEST_LOCK_EXCLUDED_(mutex_) {\n  internal::MutexLock lock(&mutex_);\n  impl_->gtest_trace_stack().push_back(trace);\n}\n\n// Pops a trace from the per-thread Google Test trace stack.\nvoid UnitTest::PopGTestTrace()\n    GTEST_LOCK_EXCLUDED_(mutex_) {\n  internal::MutexLock lock(&mutex_);\n  impl_->gtest_trace_stack().pop_back();\n}\n\nnamespace internal {\n\nUnitTestImpl::UnitTestImpl(UnitTest* parent)\n    : parent_(parent),\n      GTEST_DISABLE_MSC_WARNINGS_PUSH_(4355 /* using this in initializer */)\n      default_global_test_part_result_reporter_(this),\n      default_per_thread_test_part_result_reporter_(this),\n      GTEST_DISABLE_MSC_WARNINGS_POP_()\n      global_test_part_result_repoter_(\n          &default_global_test_part_result_reporter_),\n      per_thread_test_part_result_reporter_(\n          &default_per_thread_test_part_result_reporter_),\n#if GTEST_HAS_PARAM_TEST\n      parameterized_test_registry_(),\n      parameterized_tests_registered_(false),\n#endif  // GTEST_HAS_PARAM_TEST\n      last_death_test_case_(-1),\n      current_test_case_(NULL),\n      current_test_info_(NULL),\n      ad_hoc_test_result_(),\n      os_stack_trace_getter_(NULL),\n      post_flag_parse_init_performed_(false),\n      random_seed_(0),  // Will be overridden by the flag before first use.\n      random_(0),  // Will be reseeded before first use.\n      start_timestamp_(0),\n      elapsed_time_(0),\n#if GTEST_HAS_DEATH_TEST\n      death_test_factory_(new DefaultDeathTestFactory),\n#endif\n      // Will be overridden by the flag before first use.\n      catch_exceptions_(false) {\n  listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter);\n}\n\nUnitTestImpl::~UnitTestImpl() {\n  // Deletes every TestCase.\n  ForEach(test_cases_, internal::Delete<TestCase>);\n\n  // Deletes every Environment.\n  ForEach(environments_, internal::Delete<Environment>);\n\n  delete os_stack_trace_getter_;\n}\n\n// Adds a TestProperty to the current TestResult object when invoked in a\n// context of a test, to current test case's ad_hoc_test_result when invoke\n// from SetUpTestCase/TearDownTestCase, or to the global property set\n// otherwise.  If the result already contains a property with the same key,\n// the value will be updated.\nvoid UnitTestImpl::RecordProperty(const TestProperty& test_property) {\n  std::string xml_element;\n  TestResult* test_result;  // TestResult appropriate for property recording.\n\n  if (current_test_info_ != NULL) {\n    xml_element = \"testcase\";\n    test_result = &(current_test_info_->result_);\n  } else if (current_test_case_ != NULL) {\n    xml_element = \"testsuite\";\n    test_result = &(current_test_case_->ad_hoc_test_result_);\n  } else {\n    xml_element = \"testsuites\";\n    test_result = &ad_hoc_test_result_;\n  }\n  test_result->RecordProperty(xml_element, test_property);\n}\n\n#if GTEST_HAS_DEATH_TEST\n// Disables event forwarding if the control is currently in a death test\n// subprocess. Must not be called before InitGoogleTest.\nvoid UnitTestImpl::SuppressTestEventsIfInSubprocess() {\n  if (internal_run_death_test_flag_.get() != NULL)\n    listeners()->SuppressEventForwarding();\n}\n#endif  // GTEST_HAS_DEATH_TEST\n\n// Initializes event listeners performing XML output as specified by\n// UnitTestOptions. Must not be called before InitGoogleTest.\nvoid UnitTestImpl::ConfigureXmlOutput() {\n  const std::string& output_format = UnitTestOptions::GetOutputFormat();\n  if (output_format == \"xml\") {\n    listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter(\n        UnitTestOptions::GetAbsolutePathToOutputFile().c_str()));\n  } else if (output_format != \"\") {\n    printf(\"WARNING: unrecognized output format \\\"%s\\\" ignored.\\n\",\n           output_format.c_str());\n    fflush(stdout);\n  }\n}\n\n#if GTEST_CAN_STREAM_RESULTS_\n// Initializes event listeners for streaming test results in string form.\n// Must not be called before InitGoogleTest.\nvoid UnitTestImpl::ConfigureStreamingOutput() {\n  const std::string& target = GTEST_FLAG(stream_result_to);\n  if (!target.empty()) {\n    const size_t pos = target.find(':');\n    if (pos != std::string::npos) {\n      listeners()->Append(new StreamingListener(target.substr(0, pos),\n                                                target.substr(pos+1)));\n    } else {\n      printf(\"WARNING: unrecognized streaming target \\\"%s\\\" ignored.\\n\",\n             target.c_str());\n      fflush(stdout);\n    }\n  }\n}\n#endif  // GTEST_CAN_STREAM_RESULTS_\n\n// Performs initialization dependent upon flag values obtained in\n// ParseGoogleTestFlagsOnly.  Is called from InitGoogleTest after the call to\n// ParseGoogleTestFlagsOnly.  In case a user neglects to call InitGoogleTest\n// this function is also called from RunAllTests.  Since this function can be\n// called more than once, it has to be idempotent.\nvoid UnitTestImpl::PostFlagParsingInit() {\n  // Ensures that this function does not execute more than once.\n  if (!post_flag_parse_init_performed_) {\n    post_flag_parse_init_performed_ = true;\n\n#if defined(GTEST_CUSTOM_TEST_EVENT_LISTENER_)\n    // Register to send notifications about key process state changes.\n    listeners()->Append(new GTEST_CUSTOM_TEST_EVENT_LISTENER_());\n#endif  // defined(GTEST_CUSTOM_TEST_EVENT_LISTENER_)\n\n#if GTEST_HAS_DEATH_TEST\n    InitDeathTestSubprocessControlInfo();\n    SuppressTestEventsIfInSubprocess();\n#endif  // GTEST_HAS_DEATH_TEST\n\n    // Registers parameterized tests. This makes parameterized tests\n    // available to the UnitTest reflection API without running\n    // RUN_ALL_TESTS.\n    RegisterParameterizedTests();\n\n    // Configures listeners for XML output. This makes it possible for users\n    // to shut down the default XML output before invoking RUN_ALL_TESTS.\n    ConfigureXmlOutput();\n\n#if GTEST_CAN_STREAM_RESULTS_\n    // Configures listeners for streaming test results to the specified server.\n    ConfigureStreamingOutput();\n#endif  // GTEST_CAN_STREAM_RESULTS_\n  }\n}\n\n// A predicate that checks the name of a TestCase against a known\n// value.\n//\n// This is used for implementation of the UnitTest class only.  We put\n// it in the anonymous namespace to prevent polluting the outer\n// namespace.\n//\n// TestCaseNameIs is copyable.\nclass TestCaseNameIs {\n public:\n  // Constructor.\n  explicit TestCaseNameIs(const std::string& name)\n      : name_(name) {}\n\n  // Returns true iff the name of test_case matches name_.\n  bool operator()(const TestCase* test_case) const {\n    return test_case != NULL && strcmp(test_case->name(), name_.c_str()) == 0;\n  }\n\n private:\n  std::string name_;\n};\n\n// Finds and returns a TestCase with the given name.  If one doesn't\n// exist, creates one and returns it.  It's the CALLER'S\n// RESPONSIBILITY to ensure that this function is only called WHEN THE\n// TESTS ARE NOT SHUFFLED.\n//\n// Arguments:\n//\n//   test_case_name: name of the test case\n//   type_param:     the name of the test case's type parameter, or NULL if\n//                   this is not a typed or a type-parameterized test case.\n//   set_up_tc:      pointer to the function that sets up the test case\n//   tear_down_tc:   pointer to the function that tears down the test case\nTestCase* UnitTestImpl::GetTestCase(const char* test_case_name,\n                                    const char* type_param,\n                                    Test::SetUpTestCaseFunc set_up_tc,\n                                    Test::TearDownTestCaseFunc tear_down_tc) {\n  // Can we find a TestCase with the given name?\n  const std::vector<TestCase*>::const_iterator test_case =\n      std::find_if(test_cases_.begin(), test_cases_.end(),\n                   TestCaseNameIs(test_case_name));\n\n  if (test_case != test_cases_.end())\n    return *test_case;\n\n  // No.  Let's create one.\n  TestCase* const new_test_case =\n      new TestCase(test_case_name, type_param, set_up_tc, tear_down_tc);\n\n  // Is this a death test case?\n  if (internal::UnitTestOptions::MatchesFilter(test_case_name,\n                                               kDeathTestCaseFilter)) {\n    // Yes.  Inserts the test case after the last death test case\n    // defined so far.  This only works when the test cases haven't\n    // been shuffled.  Otherwise we may end up running a death test\n    // after a non-death test.\n    ++last_death_test_case_;\n    test_cases_.insert(test_cases_.begin() + last_death_test_case_,\n                       new_test_case);\n  } else {\n    // No.  Appends to the end of the list.\n    test_cases_.push_back(new_test_case);\n  }\n\n  test_case_indices_.push_back(static_cast<int>(test_case_indices_.size()));\n  return new_test_case;\n}\n\n// Helpers for setting up / tearing down the given environment.  They\n// are for use in the ForEach() function.\nstatic void SetUpEnvironment(Environment* env) { env->SetUp(); }\nstatic void TearDownEnvironment(Environment* env) { env->TearDown(); }\n\n// Runs all tests in this UnitTest object, prints the result, and\n// returns true if all tests are successful.  If any exception is\n// thrown during a test, the test is considered to be failed, but the\n// rest of the tests will still be run.\n//\n// When parameterized tests are enabled, it expands and registers\n// parameterized tests first in RegisterParameterizedTests().\n// All other functions called from RunAllTests() may safely assume that\n// parameterized tests are ready to be counted and run.\nbool UnitTestImpl::RunAllTests() {\n  // Makes sure InitGoogleTest() was called.\n  if (!GTestIsInitialized()) {\n    printf(\"%s\",\n           \"\\nThis test program did NOT call ::testing::InitGoogleTest \"\n           \"before calling RUN_ALL_TESTS().  Please fix it.\\n\");\n    return false;\n  }\n\n  // Do not run any test if the --help flag was specified.\n  if (g_help_flag)\n    return true;\n\n  // Repeats the call to the post-flag parsing initialization in case the\n  // user didn't call InitGoogleTest.\n  PostFlagParsingInit();\n\n  // Even if sharding is not on, test runners may want to use the\n  // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding\n  // protocol.\n  internal::WriteToShardStatusFileIfNeeded();\n\n  // True iff we are in a subprocess for running a thread-safe-style\n  // death test.\n  bool in_subprocess_for_death_test = false;\n\n#if GTEST_HAS_DEATH_TEST\n  in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != NULL);\n# if defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_)\n  if (in_subprocess_for_death_test) {\n    GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_();\n  }\n# endif  // defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_)\n#endif  // GTEST_HAS_DEATH_TEST\n\n  const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex,\n                                        in_subprocess_for_death_test);\n\n  // Compares the full test names with the filter to decide which\n  // tests to run.\n  const bool has_tests_to_run = FilterTests(should_shard\n                                              ? HONOR_SHARDING_PROTOCOL\n                                              : IGNORE_SHARDING_PROTOCOL) > 0;\n\n  // Lists the tests and exits if the --gtest_list_tests flag was specified.\n  if (GTEST_FLAG(list_tests)) {\n    // This must be called *after* FilterTests() has been called.\n    ListTestsMatchingFilter();\n    return true;\n  }\n\n  random_seed_ = GTEST_FLAG(shuffle) ?\n      GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0;\n\n  // True iff at least one test has failed.\n  bool failed = false;\n\n  TestEventListener* repeater = listeners()->repeater();\n\n  start_timestamp_ = GetTimeInMillis();\n  repeater->OnTestProgramStart(*parent_);\n\n  // How many times to repeat the tests?  We don't want to repeat them\n  // when we are inside the subprocess of a death test.\n  const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat);\n  // Repeats forever if the repeat count is negative.\n  const bool forever = repeat < 0;\n  for (int i = 0; forever || i != repeat; i++) {\n    // We want to preserve failures generated by ad-hoc test\n    // assertions executed before RUN_ALL_TESTS().\n    ClearNonAdHocTestResult();\n\n    const TimeInMillis start = GetTimeInMillis();\n\n    // Shuffles test cases and tests if requested.\n    if (has_tests_to_run && GTEST_FLAG(shuffle)) {\n      random()->Reseed(random_seed_);\n      // This should be done before calling OnTestIterationStart(),\n      // such that a test event listener can see the actual test order\n      // in the event.\n      ShuffleTests();\n    }\n\n    // Tells the unit test event listeners that the tests are about to start.\n    repeater->OnTestIterationStart(*parent_, i);\n\n    // Runs each test case if there is at least one test to run.\n    if (has_tests_to_run) {\n      // Sets up all environments beforehand.\n      repeater->OnEnvironmentsSetUpStart(*parent_);\n      ForEach(environments_, SetUpEnvironment);\n      repeater->OnEnvironmentsSetUpEnd(*parent_);\n\n      // Runs the tests only if there was no fatal failure during global\n      // set-up.\n      if (!Test::HasFatalFailure()) {\n        for (int test_index = 0; test_index < total_test_case_count();\n             test_index++) {\n          GetMutableTestCase(test_index)->Run();\n        }\n      }\n\n      // Tears down all environments in reverse order afterwards.\n      repeater->OnEnvironmentsTearDownStart(*parent_);\n      std::for_each(environments_.rbegin(), environments_.rend(),\n                    TearDownEnvironment);\n      repeater->OnEnvironmentsTearDownEnd(*parent_);\n    }\n\n    elapsed_time_ = GetTimeInMillis() - start;\n\n    // Tells the unit test event listener that the tests have just finished.\n    repeater->OnTestIterationEnd(*parent_, i);\n\n    // Gets the result and clears it.\n    if (!Passed()) {\n      failed = true;\n    }\n\n    // Restores the original test order after the iteration.  This\n    // allows the user to quickly repro a failure that happens in the\n    // N-th iteration without repeating the first (N - 1) iterations.\n    // This is not enclosed in \"if (GTEST_FLAG(shuffle)) { ... }\", in\n    // case the user somehow changes the value of the flag somewhere\n    // (it's always safe to unshuffle the tests).\n    UnshuffleTests();\n\n    if (GTEST_FLAG(shuffle)) {\n      // Picks a new random seed for each iteration.\n      random_seed_ = GetNextRandomSeed(random_seed_);\n    }\n  }\n\n  repeater->OnTestProgramEnd(*parent_);\n\n  return !failed;\n}\n\n// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file\n// if the variable is present. If a file already exists at this location, this\n// function will write over it. If the variable is present, but the file cannot\n// be created, prints an error and exits.\nvoid WriteToShardStatusFileIfNeeded() {\n  const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile);\n  if (test_shard_file != NULL) {\n    FILE* const file = posix::FOpen(test_shard_file, \"w\");\n    if (file == NULL) {\n      ColoredPrintf(COLOR_RED,\n                    \"Could not write to the test shard status file \\\"%s\\\" \"\n                    \"specified by the %s environment variable.\\n\",\n                    test_shard_file, kTestShardStatusFile);\n      fflush(stdout);\n      exit(EXIT_FAILURE);\n    }\n    fclose(file);\n  }\n}\n\n// Checks whether sharding is enabled by examining the relevant\n// environment variable values. If the variables are present,\n// but inconsistent (i.e., shard_index >= total_shards), prints\n// an error and exits. If in_subprocess_for_death_test, sharding is\n// disabled because it must only be applied to the original test\n// process. Otherwise, we could filter out death tests we intended to execute.\nbool ShouldShard(const char* total_shards_env,\n                 const char* shard_index_env,\n                 bool in_subprocess_for_death_test) {\n  if (in_subprocess_for_death_test) {\n    return false;\n  }\n\n  const Int32 total_shards = Int32FromEnvOrDie(total_shards_env, -1);\n  const Int32 shard_index = Int32FromEnvOrDie(shard_index_env, -1);\n\n  if (total_shards == -1 && shard_index == -1) {\n    return false;\n  } else if (total_shards == -1 && shard_index != -1) {\n    const Message msg = Message()\n      << \"Invalid environment variables: you have \"\n      << kTestShardIndex << \" = \" << shard_index\n      << \", but have left \" << kTestTotalShards << \" unset.\\n\";\n    ColoredPrintf(COLOR_RED, msg.GetString().c_str());\n    fflush(stdout);\n    exit(EXIT_FAILURE);\n  } else if (total_shards != -1 && shard_index == -1) {\n    const Message msg = Message()\n      << \"Invalid environment variables: you have \"\n      << kTestTotalShards << \" = \" << total_shards\n      << \", but have left \" << kTestShardIndex << \" unset.\\n\";\n    ColoredPrintf(COLOR_RED, msg.GetString().c_str());\n    fflush(stdout);\n    exit(EXIT_FAILURE);\n  } else if (shard_index < 0 || shard_index >= total_shards) {\n    const Message msg = Message()\n      << \"Invalid environment variables: we require 0 <= \"\n      << kTestShardIndex << \" < \" << kTestTotalShards\n      << \", but you have \" << kTestShardIndex << \"=\" << shard_index\n      << \", \" << kTestTotalShards << \"=\" << total_shards << \".\\n\";\n    ColoredPrintf(COLOR_RED, msg.GetString().c_str());\n    fflush(stdout);\n    exit(EXIT_FAILURE);\n  }\n\n  return total_shards > 1;\n}\n\n// Parses the environment variable var as an Int32. If it is unset,\n// returns default_val. If it is not an Int32, prints an error\n// and aborts.\nInt32 Int32FromEnvOrDie(const char* var, Int32 default_val) {\n  const char* str_val = posix::GetEnv(var);\n  if (str_val == NULL) {\n    return default_val;\n  }\n\n  Int32 result;\n  if (!ParseInt32(Message() << \"The value of environment variable \" << var,\n                  str_val, &result)) {\n    exit(EXIT_FAILURE);\n  }\n  return result;\n}\n\n// Given the total number of shards, the shard index, and the test id,\n// returns true iff the test should be run on this shard. The test id is\n// some arbitrary but unique non-negative integer assigned to each test\n// method. Assumes that 0 <= shard_index < total_shards.\nbool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) {\n  return (test_id % total_shards) == shard_index;\n}\n\n// Compares the name of each test with the user-specified filter to\n// decide whether the test should be run, then records the result in\n// each TestCase and TestInfo object.\n// If shard_tests == true, further filters tests based on sharding\n// variables in the environment - see\n// http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide.\n// Returns the number of tests that should run.\nint UnitTestImpl::FilterTests(ReactionToSharding shard_tests) {\n  const Int32 total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ?\n      Int32FromEnvOrDie(kTestTotalShards, -1) : -1;\n  const Int32 shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ?\n      Int32FromEnvOrDie(kTestShardIndex, -1) : -1;\n\n  // num_runnable_tests are the number of tests that will\n  // run across all shards (i.e., match filter and are not disabled).\n  // num_selected_tests are the number of tests to be run on\n  // this shard.\n  int num_runnable_tests = 0;\n  int num_selected_tests = 0;\n  for (size_t i = 0; i < test_cases_.size(); i++) {\n    TestCase* const test_case = test_cases_[i];\n    const std::string &test_case_name = test_case->name();\n    test_case->set_should_run(false);\n\n    for (size_t j = 0; j < test_case->test_info_list().size(); j++) {\n      TestInfo* const test_info = test_case->test_info_list()[j];\n      const std::string test_name(test_info->name());\n      // A test is disabled if test case name or test name matches\n      // kDisableTestFilter.\n      const bool is_disabled =\n          internal::UnitTestOptions::MatchesFilter(test_case_name,\n                                                   kDisableTestFilter) ||\n          internal::UnitTestOptions::MatchesFilter(test_name,\n                                                   kDisableTestFilter);\n      test_info->is_disabled_ = is_disabled;\n\n      const bool matches_filter =\n          internal::UnitTestOptions::FilterMatchesTest(test_case_name,\n                                                       test_name);\n      test_info->matches_filter_ = matches_filter;\n\n      const bool is_runnable =\n          (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) &&\n          matches_filter;\n\n      const bool is_selected = is_runnable &&\n          (shard_tests == IGNORE_SHARDING_PROTOCOL ||\n           ShouldRunTestOnShard(total_shards, shard_index,\n                                num_runnable_tests));\n\n      num_runnable_tests += is_runnable;\n      num_selected_tests += is_selected;\n\n      test_info->should_run_ = is_selected;\n      test_case->set_should_run(test_case->should_run() || is_selected);\n    }\n  }\n  return num_selected_tests;\n}\n\n// Prints the given C-string on a single line by replacing all '\\n'\n// characters with string \"\\\\n\".  If the output takes more than\n// max_length characters, only prints the first max_length characters\n// and \"...\".\nstatic void PrintOnOneLine(const char* str, int max_length) {\n  if (str != NULL) {\n    for (int i = 0; *str != '\\0'; ++str) {\n      if (i >= max_length) {\n        printf(\"...\");\n        break;\n      }\n      if (*str == '\\n') {\n        printf(\"\\\\n\");\n        i += 2;\n      } else {\n        printf(\"%c\", *str);\n        ++i;\n      }\n    }\n  }\n}\n\n// Prints the names of the tests matching the user-specified filter flag.\nvoid UnitTestImpl::ListTestsMatchingFilter() {\n  // Print at most this many characters for each type/value parameter.\n  const int kMaxParamLength = 250;\n\n  for (size_t i = 0; i < test_cases_.size(); i++) {\n    const TestCase* const test_case = test_cases_[i];\n    bool printed_test_case_name = false;\n\n    for (size_t j = 0; j < test_case->test_info_list().size(); j++) {\n      const TestInfo* const test_info =\n          test_case->test_info_list()[j];\n      if (test_info->matches_filter_) {\n        if (!printed_test_case_name) {\n          printed_test_case_name = true;\n          printf(\"%s.\", test_case->name());\n          if (test_case->type_param() != NULL) {\n            printf(\"  # %s = \", kTypeParamLabel);\n            // We print the type parameter on a single line to make\n            // the output easy to parse by a program.\n            PrintOnOneLine(test_case->type_param(), kMaxParamLength);\n          }\n          printf(\"\\n\");\n        }\n        printf(\"  %s\", test_info->name());\n        if (test_info->value_param() != NULL) {\n          printf(\"  # %s = \", kValueParamLabel);\n          // We print the value parameter on a single line to make the\n          // output easy to parse by a program.\n          PrintOnOneLine(test_info->value_param(), kMaxParamLength);\n        }\n        printf(\"\\n\");\n      }\n    }\n  }\n  fflush(stdout);\n}\n\n// Sets the OS stack trace getter.\n//\n// Does nothing if the input and the current OS stack trace getter are\n// the same; otherwise, deletes the old getter and makes the input the\n// current getter.\nvoid UnitTestImpl::set_os_stack_trace_getter(\n    OsStackTraceGetterInterface* getter) {\n  if (os_stack_trace_getter_ != getter) {\n    delete os_stack_trace_getter_;\n    os_stack_trace_getter_ = getter;\n  }\n}\n\n// Returns the current OS stack trace getter if it is not NULL;\n// otherwise, creates an OsStackTraceGetter, makes it the current\n// getter, and returns it.\nOsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() {\n  if (os_stack_trace_getter_ == NULL) {\n#ifdef GTEST_OS_STACK_TRACE_GETTER_\n    os_stack_trace_getter_ = new GTEST_OS_STACK_TRACE_GETTER_;\n#else\n    os_stack_trace_getter_ = new OsStackTraceGetter;\n#endif  // GTEST_OS_STACK_TRACE_GETTER_\n  }\n\n  return os_stack_trace_getter_;\n}\n\n// Returns the TestResult for the test that's currently running, or\n// the TestResult for the ad hoc test if no test is running.\nTestResult* UnitTestImpl::current_test_result() {\n  return current_test_info_ ?\n      &(current_test_info_->result_) : &ad_hoc_test_result_;\n}\n\n// Shuffles all test cases, and the tests within each test case,\n// making sure that death tests are still run first.\nvoid UnitTestImpl::ShuffleTests() {\n  // Shuffles the death test cases.\n  ShuffleRange(random(), 0, last_death_test_case_ + 1, &test_case_indices_);\n\n  // Shuffles the non-death test cases.\n  ShuffleRange(random(), last_death_test_case_ + 1,\n               static_cast<int>(test_cases_.size()), &test_case_indices_);\n\n  // Shuffles the tests inside each test case.\n  for (size_t i = 0; i < test_cases_.size(); i++) {\n    test_cases_[i]->ShuffleTests(random());\n  }\n}\n\n// Restores the test cases and tests to their order before the first shuffle.\nvoid UnitTestImpl::UnshuffleTests() {\n  for (size_t i = 0; i < test_cases_.size(); i++) {\n    // Unshuffles the tests in each test case.\n    test_cases_[i]->UnshuffleTests();\n    // Resets the index of each test case.\n    test_case_indices_[i] = static_cast<int>(i);\n  }\n}\n\n// Returns the current OS stack trace as an std::string.\n//\n// The maximum number of stack frames to be included is specified by\n// the gtest_stack_trace_depth flag.  The skip_count parameter\n// specifies the number of top frames to be skipped, which doesn't\n// count against the number of frames to be included.\n//\n// For example, if Foo() calls Bar(), which in turn calls\n// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in\n// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.\nstd::string GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/,\n                                            int skip_count) {\n  // We pass skip_count + 1 to skip this wrapper function in addition\n  // to what the user really wants to skip.\n  return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1);\n}\n\n// Used by the GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_ macro to\n// suppress unreachable code warnings.\nnamespace {\nclass ClassUniqueToAlwaysTrue {};\n}\n\nbool IsTrue(bool condition) { return condition; }\n\nbool AlwaysTrue() {\n#if GTEST_HAS_EXCEPTIONS\n  // This condition is always false so AlwaysTrue() never actually throws,\n  // but it makes the compiler think that it may throw.\n  if (IsTrue(false))\n    throw ClassUniqueToAlwaysTrue();\n#endif  // GTEST_HAS_EXCEPTIONS\n  return true;\n}\n\n// If *pstr starts with the given prefix, modifies *pstr to be right\n// past the prefix and returns true; otherwise leaves *pstr unchanged\n// and returns false.  None of pstr, *pstr, and prefix can be NULL.\nbool SkipPrefix(const char* prefix, const char** pstr) {\n  const size_t prefix_len = strlen(prefix);\n  if (strncmp(*pstr, prefix, prefix_len) == 0) {\n    *pstr += prefix_len;\n    return true;\n  }\n  return false;\n}\n\n// Parses a string as a command line flag.  The string should have\n// the format \"--flag=value\".  When def_optional is true, the \"=value\"\n// part can be omitted.\n//\n// Returns the value of the flag, or NULL if the parsing failed.\nconst char* ParseFlagValue(const char* str,\n                           const char* flag,\n                           bool def_optional) {\n  // str and flag must not be NULL.\n  if (str == NULL || flag == NULL) return NULL;\n\n  // The flag must start with \"--\" followed by GTEST_FLAG_PREFIX_.\n  const std::string flag_str = std::string(\"--\") + GTEST_FLAG_PREFIX_ + flag;\n  const size_t flag_len = flag_str.length();\n  if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL;\n\n  // Skips the flag name.\n  const char* flag_end = str + flag_len;\n\n  // When def_optional is true, it's OK to not have a \"=value\" part.\n  if (def_optional && (flag_end[0] == '\\0')) {\n    return flag_end;\n  }\n\n  // If def_optional is true and there are more characters after the\n  // flag name, or if def_optional is false, there must be a '=' after\n  // the flag name.\n  if (flag_end[0] != '=') return NULL;\n\n  // Returns the string after \"=\".\n  return flag_end + 1;\n}\n\n// Parses a string for a bool flag, in the form of either\n// \"--flag=value\" or \"--flag\".\n//\n// In the former case, the value is taken as true as long as it does\n// not start with '0', 'f', or 'F'.\n//\n// In the latter case, the value is taken as true.\n//\n// On success, stores the value of the flag in *value, and returns\n// true.  On failure, returns false without changing *value.\nbool ParseBoolFlag(const char* str, const char* flag, bool* value) {\n  // Gets the value of the flag as a string.\n  const char* const value_str = ParseFlagValue(str, flag, true);\n\n  // Aborts if the parsing failed.\n  if (value_str == NULL) return false;\n\n  // Converts the string value to a bool.\n  *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F');\n  return true;\n}\n\n// Parses a string for an Int32 flag, in the form of\n// \"--flag=value\".\n//\n// On success, stores the value of the flag in *value, and returns\n// true.  On failure, returns false without changing *value.\nbool ParseInt32Flag(const char* str, const char* flag, Int32* value) {\n  // Gets the value of the flag as a string.\n  const char* const value_str = ParseFlagValue(str, flag, false);\n\n  // Aborts if the parsing failed.\n  if (value_str == NULL) return false;\n\n  // Sets *value to the value of the flag.\n  return ParseInt32(Message() << \"The value of flag --\" << flag,\n                    value_str, value);\n}\n\n// Parses a string for a string flag, in the form of\n// \"--flag=value\".\n//\n// On success, stores the value of the flag in *value, and returns\n// true.  On failure, returns false without changing *value.\nbool ParseStringFlag(const char* str, const char* flag, std::string* value) {\n  // Gets the value of the flag as a string.\n  const char* const value_str = ParseFlagValue(str, flag, false);\n\n  // Aborts if the parsing failed.\n  if (value_str == NULL) return false;\n\n  // Sets *value to the value of the flag.\n  *value = value_str;\n  return true;\n}\n\n// Determines whether a string has a prefix that Google Test uses for its\n// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_.\n// If Google Test detects that a command line flag has its prefix but is not\n// recognized, it will print its help message. Flags starting with\n// GTEST_INTERNAL_PREFIX_ followed by \"internal_\" are considered Google Test\n// internal flags and do not trigger the help message.\nstatic bool HasGoogleTestFlagPrefix(const char* str) {\n  return (SkipPrefix(\"--\", &str) ||\n          SkipPrefix(\"-\", &str) ||\n          SkipPrefix(\"/\", &str)) &&\n         !SkipPrefix(GTEST_FLAG_PREFIX_ \"internal_\", &str) &&\n         (SkipPrefix(GTEST_FLAG_PREFIX_, &str) ||\n          SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str));\n}\n\n// Prints a string containing code-encoded text.  The following escape\n// sequences can be used in the string to control the text color:\n//\n//   @@    prints a single '@' character.\n//   @R    changes the color to red.\n//   @G    changes the color to green.\n//   @Y    changes the color to yellow.\n//   @D    changes to the default terminal text color.\n//\n// TODO(wan@google.com): Write tests for this once we add stdout\n// capturing to Google Test.\nstatic void PrintColorEncoded(const char* str) {\n  GTestColor color = COLOR_DEFAULT;  // The current color.\n\n  // Conceptually, we split the string into segments divided by escape\n  // sequences.  Then we print one segment at a time.  At the end of\n  // each iteration, the str pointer advances to the beginning of the\n  // next segment.\n  for (;;) {\n    const char* p = strchr(str, '@');\n    if (p == NULL) {\n      ColoredPrintf(color, \"%s\", str);\n      return;\n    }\n\n    ColoredPrintf(color, \"%s\", std::string(str, p).c_str());\n\n    const char ch = p[1];\n    str = p + 2;\n    if (ch == '@') {\n      ColoredPrintf(color, \"@\");\n    } else if (ch == 'D') {\n      color = COLOR_DEFAULT;\n    } else if (ch == 'R') {\n      color = COLOR_RED;\n    } else if (ch == 'G') {\n      color = COLOR_GREEN;\n    } else if (ch == 'Y') {\n      color = COLOR_YELLOW;\n    } else {\n      --str;\n    }\n  }\n}\n\nstatic const char kColorEncodedHelpMessage[] =\n\"This program contains tests written using \" GTEST_NAME_ \". You can use the\\n\"\n\"following command line flags to control its behavior:\\n\"\n\"\\n\"\n\"Test Selection:\\n\"\n\"  @G--\" GTEST_FLAG_PREFIX_ \"list_tests@D\\n\"\n\"      List the names of all tests instead of running them. The name of\\n\"\n\"      TEST(Foo, Bar) is \\\"Foo.Bar\\\".\\n\"\n\"  @G--\" GTEST_FLAG_PREFIX_ \"filter=@YPOSTIVE_PATTERNS\"\n    \"[@G-@YNEGATIVE_PATTERNS]@D\\n\"\n\"      Run only the tests whose name matches one of the positive patterns but\\n\"\n\"      none of the negative patterns. '?' matches any single character; '*'\\n\"\n\"      matches any substring; ':' separates two patterns.\\n\"\n\"  @G--\" GTEST_FLAG_PREFIX_ \"also_run_disabled_tests@D\\n\"\n\"      Run all disabled tests too.\\n\"\n\"\\n\"\n\"Test Execution:\\n\"\n\"  @G--\" GTEST_FLAG_PREFIX_ \"repeat=@Y[COUNT]@D\\n\"\n\"      Run the tests repeatedly; use a negative count to repeat forever.\\n\"\n\"  @G--\" GTEST_FLAG_PREFIX_ \"shuffle@D\\n\"\n\"      Randomize tests' orders on every iteration.\\n\"\n\"  @G--\" GTEST_FLAG_PREFIX_ \"random_seed=@Y[NUMBER]@D\\n\"\n\"      Random number seed to use for shuffling test orders (between 1 and\\n\"\n\"      99999, or 0 to use a seed based on the current time).\\n\"\n\"\\n\"\n\"Test Output:\\n\"\n\"  @G--\" GTEST_FLAG_PREFIX_ \"color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\\n\"\n\"      Enable/disable colored output. The default is @Gauto@D.\\n\"\n\"  -@G-\" GTEST_FLAG_PREFIX_ \"print_time=0@D\\n\"\n\"      Don't print the elapsed time of each test.\\n\"\n\"  @G--\" GTEST_FLAG_PREFIX_ \"output=xml@Y[@G:@YDIRECTORY_PATH@G\"\n    GTEST_PATH_SEP_ \"@Y|@G:@YFILE_PATH]@D\\n\"\n\"      Generate an XML report in the given directory or with the given file\\n\"\n\"      name. @YFILE_PATH@D defaults to @Gtest_details.xml@D.\\n\"\n#if GTEST_CAN_STREAM_RESULTS_\n\"  @G--\" GTEST_FLAG_PREFIX_ \"stream_result_to=@YHOST@G:@YPORT@D\\n\"\n\"      Stream test results to the given server.\\n\"\n#endif  // GTEST_CAN_STREAM_RESULTS_\n\"\\n\"\n\"Assertion Behavior:\\n\"\n#if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS\n\"  @G--\" GTEST_FLAG_PREFIX_ \"death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\\n\"\n\"      Set the default death test style.\\n\"\n#endif  // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS\n\"  @G--\" GTEST_FLAG_PREFIX_ \"break_on_failure@D\\n\"\n\"      Turn assertion failures into debugger break-points.\\n\"\n\"  @G--\" GTEST_FLAG_PREFIX_ \"throw_on_failure@D\\n\"\n\"      Turn assertion failures into C++ exceptions.\\n\"\n\"  @G--\" GTEST_FLAG_PREFIX_ \"catch_exceptions=0@D\\n\"\n\"      Do not report exceptions as test failures. Instead, allow them\\n\"\n\"      to crash the program or throw a pop-up (on Windows).\\n\"\n\"\\n\"\n\"Except for @G--\" GTEST_FLAG_PREFIX_ \"list_tests@D, you can alternatively set \"\n    \"the corresponding\\n\"\n\"environment variable of a flag (all letters in upper-case). For example, to\\n\"\n\"disable colored text output, you can either specify @G--\" GTEST_FLAG_PREFIX_\n    \"color=no@D or set\\n\"\n\"the @G\" GTEST_FLAG_PREFIX_UPPER_ \"COLOR@D environment variable to @Gno@D.\\n\"\n\"\\n\"\n\"For more information, please read the \" GTEST_NAME_ \" documentation at\\n\"\n\"@G\" GTEST_PROJECT_URL_ \"@D. If you find a bug in \" GTEST_NAME_ \"\\n\"\n\"(not one in your own code or tests), please report it to\\n\"\n\"@G<\" GTEST_DEV_EMAIL_ \">@D.\\n\";\n\nbool ParseGoogleTestFlag(const char* const arg) {\n  return ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag,\n                       &GTEST_FLAG(also_run_disabled_tests)) ||\n      ParseBoolFlag(arg, kBreakOnFailureFlag,\n                    &GTEST_FLAG(break_on_failure)) ||\n      ParseBoolFlag(arg, kCatchExceptionsFlag,\n                    &GTEST_FLAG(catch_exceptions)) ||\n      ParseStringFlag(arg, kColorFlag, &GTEST_FLAG(color)) ||\n      ParseStringFlag(arg, kDeathTestStyleFlag,\n                      &GTEST_FLAG(death_test_style)) ||\n      ParseBoolFlag(arg, kDeathTestUseFork,\n                    &GTEST_FLAG(death_test_use_fork)) ||\n      ParseStringFlag(arg, kFilterFlag, &GTEST_FLAG(filter)) ||\n      ParseStringFlag(arg, kInternalRunDeathTestFlag,\n                      &GTEST_FLAG(internal_run_death_test)) ||\n      ParseBoolFlag(arg, kListTestsFlag, &GTEST_FLAG(list_tests)) ||\n      ParseStringFlag(arg, kOutputFlag, &GTEST_FLAG(output)) ||\n      ParseBoolFlag(arg, kPrintTimeFlag, &GTEST_FLAG(print_time)) ||\n      ParseInt32Flag(arg, kRandomSeedFlag, &GTEST_FLAG(random_seed)) ||\n      ParseInt32Flag(arg, kRepeatFlag, &GTEST_FLAG(repeat)) ||\n      ParseBoolFlag(arg, kShuffleFlag, &GTEST_FLAG(shuffle)) ||\n      ParseInt32Flag(arg, kStackTraceDepthFlag,\n                     &GTEST_FLAG(stack_trace_depth)) ||\n      ParseStringFlag(arg, kStreamResultToFlag,\n                      &GTEST_FLAG(stream_result_to)) ||\n      ParseBoolFlag(arg, kThrowOnFailureFlag,\n                    &GTEST_FLAG(throw_on_failure));\n}\n\n#if GTEST_USE_OWN_FLAGFILE_FLAG_\nvoid LoadFlagsFromFile(const std::string& path) {\n  FILE* flagfile = posix::FOpen(path.c_str(), \"r\");\n  if (!flagfile) {\n    fprintf(stderr,\n            \"Unable to open file \\\"%s\\\"\\n\",\n            GTEST_FLAG(flagfile).c_str());\n    fflush(stderr);\n    exit(EXIT_FAILURE);\n  }\n  std::string contents(ReadEntireFile(flagfile));\n  posix::FClose(flagfile);\n  std::vector<std::string> lines;\n  SplitString(contents, '\\n', &lines);\n  for (size_t i = 0; i < lines.size(); ++i) {\n    if (lines[i].empty())\n      continue;\n    if (!ParseGoogleTestFlag(lines[i].c_str()))\n      g_help_flag = true;\n  }\n}\n#endif  // GTEST_USE_OWN_FLAGFILE_FLAG_\n\n// Parses the command line for Google Test flags, without initializing\n// other parts of Google Test.  The type parameter CharType can be\n// instantiated to either char or wchar_t.\ntemplate <typename CharType>\nvoid ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) {\n  for (int i = 1; i < *argc; i++) {\n    const std::string arg_string = StreamableToString(argv[i]);\n    const char* const arg = arg_string.c_str();\n\n    using internal::ParseBoolFlag;\n    using internal::ParseInt32Flag;\n    using internal::ParseStringFlag;\n\n    bool remove_flag = false;\n    if (ParseGoogleTestFlag(arg)) {\n      remove_flag = true;\n#if GTEST_USE_OWN_FLAGFILE_FLAG_\n    } else if (ParseStringFlag(arg, kFlagfileFlag, &GTEST_FLAG(flagfile))) {\n      LoadFlagsFromFile(GTEST_FLAG(flagfile));\n      remove_flag = true;\n#endif  // GTEST_USE_OWN_FLAGFILE_FLAG_\n    } else if (arg_string == \"--help\" || arg_string == \"-h\" ||\n               arg_string == \"-?\" || arg_string == \"/?\" ||\n               HasGoogleTestFlagPrefix(arg)) {\n      // Both help flag and unrecognized Google Test flags (excluding\n      // internal ones) trigger help display.\n      g_help_flag = true;\n    }\n\n    if (remove_flag) {\n      // Shift the remainder of the argv list left by one.  Note\n      // that argv has (*argc + 1) elements, the last one always being\n      // NULL.  The following loop moves the trailing NULL element as\n      // well.\n      for (int j = i; j != *argc; j++) {\n        argv[j] = argv[j + 1];\n      }\n\n      // Decrements the argument count.\n      (*argc)--;\n\n      // We also need to decrement the iterator as we just removed\n      // an element.\n      i--;\n    }\n  }\n\n  if (g_help_flag) {\n    // We print the help here instead of in RUN_ALL_TESTS(), as the\n    // latter may not be called at all if the user is using Google\n    // Test with another testing framework.\n    PrintColorEncoded(kColorEncodedHelpMessage);\n  }\n}\n\n// Parses the command line for Google Test flags, without initializing\n// other parts of Google Test.\nvoid ParseGoogleTestFlagsOnly(int* argc, char** argv) {\n  ParseGoogleTestFlagsOnlyImpl(argc, argv);\n}\nvoid ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) {\n  ParseGoogleTestFlagsOnlyImpl(argc, argv);\n}\n\n// The internal implementation of InitGoogleTest().\n//\n// The type parameter CharType can be instantiated to either char or\n// wchar_t.\ntemplate <typename CharType>\nvoid InitGoogleTestImpl(int* argc, CharType** argv) {\n  // We don't want to run the initialization code twice.\n  if (GTestIsInitialized()) return;\n\n  if (*argc <= 0) return;\n\n  g_argvs.clear();\n  for (int i = 0; i != *argc; i++) {\n    g_argvs.push_back(StreamableToString(argv[i]));\n  }\n\n  ParseGoogleTestFlagsOnly(argc, argv);\n  GetUnitTestImpl()->PostFlagParsingInit();\n}\n\n}  // namespace internal\n\n// Initializes Google Test.  This must be called before calling\n// RUN_ALL_TESTS().  In particular, it parses a command line for the\n// flags that Google Test recognizes.  Whenever a Google Test flag is\n// seen, it is removed from argv, and *argc is decremented.\n//\n// No value is returned.  Instead, the Google Test flag variables are\n// updated.\n//\n// Calling the function for the second time has no user-visible effect.\nvoid InitGoogleTest(int* argc, char** argv) {\n#if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)\n  GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(argc, argv);\n#else  // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)\n  internal::InitGoogleTestImpl(argc, argv);\n#endif  // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)\n}\n\n// This overloaded version can be used in Windows programs compiled in\n// UNICODE mode.\nvoid InitGoogleTest(int* argc, wchar_t** argv) {\n#if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)\n  GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(argc, argv);\n#else  // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)\n  internal::InitGoogleTestImpl(argc, argv);\n#endif  // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)\n}\n\n}  // namespace testing\n// Copyright 2005, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan), vladl@google.com (Vlad Losev)\n//\n// This file implements death tests.\n\n\n#if GTEST_HAS_DEATH_TEST\n\n# if GTEST_OS_MAC\n#  include <crt_externs.h>\n# endif  // GTEST_OS_MAC\n\n# include <errno.h>\n# include <fcntl.h>\n# include <limits.h>\n\n# if GTEST_OS_LINUX\n#  include <signal.h>\n# endif  // GTEST_OS_LINUX\n\n# include <stdarg.h>\n\n# if GTEST_OS_WINDOWS\n#  include <windows.h>\n# else\n#  include <sys/mman.h>\n#  include <sys/wait.h>\n# endif  // GTEST_OS_WINDOWS\n\n# if GTEST_OS_QNX\n#  include <spawn.h>\n# endif  // GTEST_OS_QNX\n\n#endif  // GTEST_HAS_DEATH_TEST\n\n\n// Indicates that this translation unit is part of Google Test's\n// implementation.  It must come before gtest-internal-inl.h is\n// included, or there will be a compiler error.  This trick exists to\n// prevent the accidental inclusion of gtest-internal-inl.h in the\n// user's code.\n#define GTEST_IMPLEMENTATION_ 1\n#undef GTEST_IMPLEMENTATION_\n\nnamespace testing {\n\n// Constants.\n\n// The default death test style.\nstatic const char kDefaultDeathTestStyle[] = \"fast\";\n\nGTEST_DEFINE_string_(\n    death_test_style,\n    internal::StringFromGTestEnv(\"death_test_style\", kDefaultDeathTestStyle),\n    \"Indicates how to run a death test in a forked child process: \"\n    \"\\\"threadsafe\\\" (child process re-executes the test binary \"\n    \"from the beginning, running only the specific death test) or \"\n    \"\\\"fast\\\" (child process runs the death test immediately \"\n    \"after forking).\");\n\nGTEST_DEFINE_bool_(\n    death_test_use_fork,\n    internal::BoolFromGTestEnv(\"death_test_use_fork\", false),\n    \"Instructs to use fork()/_exit() instead of clone() in death tests. \"\n    \"Ignored and always uses fork() on POSIX systems where clone() is not \"\n    \"implemented. Useful when running under valgrind or similar tools if \"\n    \"those do not support clone(). Valgrind 3.3.1 will just fail if \"\n    \"it sees an unsupported combination of clone() flags. \"\n    \"It is not recommended to use this flag w/o valgrind though it will \"\n    \"work in 99% of the cases. Once valgrind is fixed, this flag will \"\n    \"most likely be removed.\");\n\nnamespace internal {\nGTEST_DEFINE_string_(\n    internal_run_death_test, \"\",\n    \"Indicates the file, line number, temporal index of \"\n    \"the single death test to run, and a file descriptor to \"\n    \"which a success code may be sent, all separated by \"\n    \"the '|' characters.  This flag is specified if and only if the current \"\n    \"process is a sub-process launched for running a thread-safe \"\n    \"death test.  FOR INTERNAL USE ONLY.\");\n}  // namespace internal\n\n#if GTEST_HAS_DEATH_TEST\n\nnamespace internal {\n\n// Valid only for fast death tests. Indicates the code is running in the\n// child process of a fast style death test.\n# if !GTEST_OS_WINDOWS\nstatic bool g_in_fast_death_test_child = false;\n# endif\n\n// Returns a Boolean value indicating whether the caller is currently\n// executing in the context of the death test child process.  Tools such as\n// Valgrind heap checkers may need this to modify their behavior in death\n// tests.  IMPORTANT: This is an internal utility.  Using it may break the\n// implementation of death tests.  User code MUST NOT use it.\nbool InDeathTestChild() {\n# if GTEST_OS_WINDOWS\n\n  // On Windows, death tests are thread-safe regardless of the value of the\n  // death_test_style flag.\n  return !GTEST_FLAG(internal_run_death_test).empty();\n\n# else\n\n  if (GTEST_FLAG(death_test_style) == \"threadsafe\")\n    return !GTEST_FLAG(internal_run_death_test).empty();\n  else\n    return g_in_fast_death_test_child;\n#endif\n}\n\n}  // namespace internal\n\n// ExitedWithCode constructor.\nExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) {\n}\n\n// ExitedWithCode function-call operator.\nbool ExitedWithCode::operator()(int exit_status) const {\n# if GTEST_OS_WINDOWS\n\n  return exit_status == exit_code_;\n\n# else\n\n  return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_;\n\n# endif  // GTEST_OS_WINDOWS\n}\n\n# if !GTEST_OS_WINDOWS\n// KilledBySignal constructor.\nKilledBySignal::KilledBySignal(int signum) : signum_(signum) {\n}\n\n// KilledBySignal function-call operator.\nbool KilledBySignal::operator()(int exit_status) const {\n#  if defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_)\n  {\n    bool result;\n    if (GTEST_KILLED_BY_SIGNAL_OVERRIDE_(signum_, exit_status, &result)) {\n      return result;\n    }\n  }\n#  endif  // defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_)\n  return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_;\n}\n# endif  // !GTEST_OS_WINDOWS\n\nnamespace internal {\n\n// Utilities needed for death tests.\n\n// Generates a textual description of a given exit code, in the format\n// specified by wait(2).\nstatic std::string ExitSummary(int exit_code) {\n  Message m;\n\n# if GTEST_OS_WINDOWS\n\n  m << \"Exited with exit status \" << exit_code;\n\n# else\n\n  if (WIFEXITED(exit_code)) {\n    m << \"Exited with exit status \" << WEXITSTATUS(exit_code);\n  } else if (WIFSIGNALED(exit_code)) {\n    m << \"Terminated by signal \" << WTERMSIG(exit_code);\n  }\n#  ifdef WCOREDUMP\n  if (WCOREDUMP(exit_code)) {\n    m << \" (core dumped)\";\n  }\n#  endif\n# endif  // GTEST_OS_WINDOWS\n\n  return m.GetString();\n}\n\n// Returns true if exit_status describes a process that was terminated\n// by a signal, or exited normally with a nonzero exit code.\nbool ExitedUnsuccessfully(int exit_status) {\n  return !ExitedWithCode(0)(exit_status);\n}\n\n# if !GTEST_OS_WINDOWS\n// Generates a textual failure message when a death test finds more than\n// one thread running, or cannot determine the number of threads, prior\n// to executing the given statement.  It is the responsibility of the\n// caller not to pass a thread_count of 1.\nstatic std::string DeathTestThreadWarning(size_t thread_count) {\n  Message msg;\n  msg << \"Death tests use fork(), which is unsafe particularly\"\n      << \" in a threaded context. For this test, \" << GTEST_NAME_ << \" \";\n  if (thread_count == 0)\n    msg << \"couldn't detect the number of threads.\";\n  else\n    msg << \"detected \" << thread_count << \" threads.\";\n  return msg.GetString();\n}\n# endif  // !GTEST_OS_WINDOWS\n\n// Flag characters for reporting a death test that did not die.\nstatic const char kDeathTestLived = 'L';\nstatic const char kDeathTestReturned = 'R';\nstatic const char kDeathTestThrew = 'T';\nstatic const char kDeathTestInternalError = 'I';\n\n// An enumeration describing all of the possible ways that a death test can\n// conclude.  DIED means that the process died while executing the test\n// code; LIVED means that process lived beyond the end of the test code;\n// RETURNED means that the test statement attempted to execute a return\n// statement, which is not allowed; THREW means that the test statement\n// returned control by throwing an exception.  IN_PROGRESS means the test\n// has not yet concluded.\n// TODO(vladl@google.com): Unify names and possibly values for\n// AbortReason, DeathTestOutcome, and flag characters above.\nenum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW };\n\n// Routine for aborting the program which is safe to call from an\n// exec-style death test child process, in which case the error\n// message is propagated back to the parent process.  Otherwise, the\n// message is simply printed to stderr.  In either case, the program\n// then exits with status 1.\nvoid DeathTestAbort(const std::string& message) {\n  // On a POSIX system, this function may be called from a threadsafe-style\n  // death test child process, which operates on a very small stack.  Use\n  // the heap for any additional non-minuscule memory requirements.\n  const InternalRunDeathTestFlag* const flag =\n      GetUnitTestImpl()->internal_run_death_test_flag();\n  if (flag != NULL) {\n    FILE* parent = posix::FDOpen(flag->write_fd(), \"w\");\n    fputc(kDeathTestInternalError, parent);\n    fprintf(parent, \"%s\", message.c_str());\n    fflush(parent);\n    _exit(1);\n  } else {\n    fprintf(stderr, \"%s\", message.c_str());\n    fflush(stderr);\n    posix::Abort();\n  }\n}\n\n// A replacement for CHECK that calls DeathTestAbort if the assertion\n// fails.\n# define GTEST_DEATH_TEST_CHECK_(expression) \\\n  do { \\\n    if (!::testing::internal::IsTrue(expression)) { \\\n      DeathTestAbort( \\\n          ::std::string(\"CHECK failed: File \") + __FILE__ +  \", line \" \\\n          + ::testing::internal::StreamableToString(__LINE__) + \": \" \\\n          + #expression); \\\n    } \\\n  } while (::testing::internal::AlwaysFalse())\n\n// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for\n// evaluating any system call that fulfills two conditions: it must return\n// -1 on failure, and set errno to EINTR when it is interrupted and\n// should be tried again.  The macro expands to a loop that repeatedly\n// evaluates the expression as long as it evaluates to -1 and sets\n// errno to EINTR.  If the expression evaluates to -1 but errno is\n// something other than EINTR, DeathTestAbort is called.\n# define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \\\n  do { \\\n    int gtest_retval; \\\n    do { \\\n      gtest_retval = (expression); \\\n    } while (gtest_retval == -1 && errno == EINTR); \\\n    if (gtest_retval == -1) { \\\n      DeathTestAbort( \\\n          ::std::string(\"CHECK failed: File \") + __FILE__ + \", line \" \\\n          + ::testing::internal::StreamableToString(__LINE__) + \": \" \\\n          + #expression + \" != -1\"); \\\n    } \\\n  } while (::testing::internal::AlwaysFalse())\n\n// Returns the message describing the last system error in errno.\nstd::string GetLastErrnoDescription() {\n    return errno == 0 ? \"\" : posix::StrError(errno);\n}\n\n// This is called from a death test parent process to read a failure\n// message from the death test child process and log it with the FATAL\n// severity. On Windows, the message is read from a pipe handle. On other\n// platforms, it is read from a file descriptor.\nstatic void FailFromInternalError(int fd) {\n  Message error;\n  char buffer[256];\n  int num_read;\n\n  do {\n    while ((num_read = posix::Read(fd, buffer, 255)) > 0) {\n      buffer[num_read] = '\\0';\n      error << buffer;\n    }\n  } while (num_read == -1 && errno == EINTR);\n\n  if (num_read == 0) {\n    GTEST_LOG_(FATAL) << error.GetString();\n  } else {\n    const int last_error = errno;\n    GTEST_LOG_(FATAL) << \"Error while reading death test internal: \"\n                      << GetLastErrnoDescription() << \" [\" << last_error << \"]\";\n  }\n}\n\n// Death test constructor.  Increments the running death test count\n// for the current test.\nDeathTest::DeathTest() {\n  TestInfo* const info = GetUnitTestImpl()->current_test_info();\n  if (info == NULL) {\n    DeathTestAbort(\"Cannot run a death test outside of a TEST or \"\n                   \"TEST_F construct\");\n  }\n}\n\n// Creates and returns a death test by dispatching to the current\n// death test factory.\nbool DeathTest::Create(const char* statement, const RE* regex,\n                       const char* file, int line, DeathTest** test) {\n  return GetUnitTestImpl()->death_test_factory()->Create(\n      statement, regex, file, line, test);\n}\n\nconst char* DeathTest::LastMessage() {\n  return last_death_test_message_.c_str();\n}\n\nvoid DeathTest::set_last_death_test_message(const std::string& message) {\n  last_death_test_message_ = message;\n}\n\nstd::string DeathTest::last_death_test_message_;\n\n// Provides cross platform implementation for some death functionality.\nclass DeathTestImpl : public DeathTest {\n protected:\n  DeathTestImpl(const char* a_statement, const RE* a_regex)\n      : statement_(a_statement),\n        regex_(a_regex),\n        spawned_(false),\n        status_(-1),\n        outcome_(IN_PROGRESS),\n        read_fd_(-1),\n        write_fd_(-1) {}\n\n  // read_fd_ is expected to be closed and cleared by a derived class.\n  ~DeathTestImpl() { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); }\n\n  void Abort(AbortReason reason);\n  virtual bool Passed(bool status_ok);\n\n  const char* statement() const { return statement_; }\n  const RE* regex() const { return regex_; }\n  bool spawned() const { return spawned_; }\n  void set_spawned(bool is_spawned) { spawned_ = is_spawned; }\n  int status() const { return status_; }\n  void set_status(int a_status) { status_ = a_status; }\n  DeathTestOutcome outcome() const { return outcome_; }\n  void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; }\n  int read_fd() const { return read_fd_; }\n  void set_read_fd(int fd) { read_fd_ = fd; }\n  int write_fd() const { return write_fd_; }\n  void set_write_fd(int fd) { write_fd_ = fd; }\n\n  // Called in the parent process only. Reads the result code of the death\n  // test child process via a pipe, interprets it to set the outcome_\n  // member, and closes read_fd_.  Outputs diagnostics and terminates in\n  // case of unexpected codes.\n  void ReadAndInterpretStatusByte();\n\n private:\n  // The textual content of the code this object is testing.  This class\n  // doesn't own this string and should not attempt to delete it.\n  const char* const statement_;\n  // The regular expression which test output must match.  DeathTestImpl\n  // doesn't own this object and should not attempt to delete it.\n  const RE* const regex_;\n  // True if the death test child process has been successfully spawned.\n  bool spawned_;\n  // The exit status of the child process.\n  int status_;\n  // How the death test concluded.\n  DeathTestOutcome outcome_;\n  // Descriptor to the read end of the pipe to the child process.  It is\n  // always -1 in the child process.  The child keeps its write end of the\n  // pipe in write_fd_.\n  int read_fd_;\n  // Descriptor to the child's write end of the pipe to the parent process.\n  // It is always -1 in the parent process.  The parent keeps its end of the\n  // pipe in read_fd_.\n  int write_fd_;\n};\n\n// Called in the parent process only. Reads the result code of the death\n// test child process via a pipe, interprets it to set the outcome_\n// member, and closes read_fd_.  Outputs diagnostics and terminates in\n// case of unexpected codes.\nvoid DeathTestImpl::ReadAndInterpretStatusByte() {\n  char flag;\n  int bytes_read;\n\n  // The read() here blocks until data is available (signifying the\n  // failure of the death test) or until the pipe is closed (signifying\n  // its success), so it's okay to call this in the parent before\n  // the child process has exited.\n  do {\n    bytes_read = posix::Read(read_fd(), &flag, 1);\n  } while (bytes_read == -1 && errno == EINTR);\n\n  if (bytes_read == 0) {\n    set_outcome(DIED);\n  } else if (bytes_read == 1) {\n    switch (flag) {\n      case kDeathTestReturned:\n        set_outcome(RETURNED);\n        break;\n      case kDeathTestThrew:\n        set_outcome(THREW);\n        break;\n      case kDeathTestLived:\n        set_outcome(LIVED);\n        break;\n      case kDeathTestInternalError:\n        FailFromInternalError(read_fd());  // Does not return.\n        break;\n      default:\n        GTEST_LOG_(FATAL) << \"Death test child process reported \"\n                          << \"unexpected status byte (\"\n                          << static_cast<unsigned int>(flag) << \")\";\n    }\n  } else {\n    GTEST_LOG_(FATAL) << \"Read from death test child process failed: \"\n                      << GetLastErrnoDescription();\n  }\n  GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd()));\n  set_read_fd(-1);\n}\n\n// Signals that the death test code which should have exited, didn't.\n// Should be called only in a death test child process.\n// Writes a status byte to the child's status file descriptor, then\n// calls _exit(1).\nvoid DeathTestImpl::Abort(AbortReason reason) {\n  // The parent process considers the death test to be a failure if\n  // it finds any data in our pipe.  So, here we write a single flag byte\n  // to the pipe, then exit.\n  const char status_ch =\n      reason == TEST_DID_NOT_DIE ? kDeathTestLived :\n      reason == TEST_THREW_EXCEPTION ? kDeathTestThrew : kDeathTestReturned;\n\n  GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1));\n  // We are leaking the descriptor here because on some platforms (i.e.,\n  // when built as Windows DLL), destructors of global objects will still\n  // run after calling _exit(). On such systems, write_fd_ will be\n  // indirectly closed from the destructor of UnitTestImpl, causing double\n  // close if it is also closed here. On debug configurations, double close\n  // may assert. As there are no in-process buffers to flush here, we are\n  // relying on the OS to close the descriptor after the process terminates\n  // when the destructors are not run.\n  _exit(1);  // Exits w/o any normal exit hooks (we were supposed to crash)\n}\n\n// Returns an indented copy of stderr output for a death test.\n// This makes distinguishing death test output lines from regular log lines\n// much easier.\nstatic ::std::string FormatDeathTestOutput(const ::std::string& output) {\n  ::std::string ret;\n  for (size_t at = 0; ; ) {\n    const size_t line_end = output.find('\\n', at);\n    ret += \"[  DEATH   ] \";\n    if (line_end == ::std::string::npos) {\n      ret += output.substr(at);\n      break;\n    }\n    ret += output.substr(at, line_end + 1 - at);\n    at = line_end + 1;\n  }\n  return ret;\n}\n\n// Assesses the success or failure of a death test, using both private\n// members which have previously been set, and one argument:\n//\n// Private data members:\n//   outcome:  An enumeration describing how the death test\n//             concluded: DIED, LIVED, THREW, or RETURNED.  The death test\n//             fails in the latter three cases.\n//   status:   The exit status of the child process. On *nix, it is in the\n//             in the format specified by wait(2). On Windows, this is the\n//             value supplied to the ExitProcess() API or a numeric code\n//             of the exception that terminated the program.\n//   regex:    A regular expression object to be applied to\n//             the test's captured standard error output; the death test\n//             fails if it does not match.\n//\n// Argument:\n//   status_ok: true if exit_status is acceptable in the context of\n//              this particular death test, which fails if it is false\n//\n// Returns true iff all of the above conditions are met.  Otherwise, the\n// first failing condition, in the order given above, is the one that is\n// reported. Also sets the last death test message string.\nbool DeathTestImpl::Passed(bool status_ok) {\n  if (!spawned())\n    return false;\n\n  const std::string error_message = GetCapturedStderr();\n\n  bool success = false;\n  Message buffer;\n\n  buffer << \"Death test: \" << statement() << \"\\n\";\n  switch (outcome()) {\n    case LIVED:\n      buffer << \"    Result: failed to die.\\n\"\n             << \" Error msg:\\n\" << FormatDeathTestOutput(error_message);\n      break;\n    case THREW:\n      buffer << \"    Result: threw an exception.\\n\"\n             << \" Error msg:\\n\" << FormatDeathTestOutput(error_message);\n      break;\n    case RETURNED:\n      buffer << \"    Result: illegal return in test statement.\\n\"\n             << \" Error msg:\\n\" << FormatDeathTestOutput(error_message);\n      break;\n    case DIED:\n      if (status_ok) {\n        const bool matched = RE::PartialMatch(error_message.c_str(), *regex());\n        if (matched) {\n          success = true;\n        } else {\n          buffer << \"    Result: died but not with expected error.\\n\"\n                 << \"  Expected: \" << regex()->pattern() << \"\\n\"\n                 << \"Actual msg:\\n\" << FormatDeathTestOutput(error_message);\n        }\n      } else {\n        buffer << \"    Result: died but not with expected exit code:\\n\"\n               << \"            \" << ExitSummary(status()) << \"\\n\"\n               << \"Actual msg:\\n\" << FormatDeathTestOutput(error_message);\n      }\n      break;\n    case IN_PROGRESS:\n    default:\n      GTEST_LOG_(FATAL)\n          << \"DeathTest::Passed somehow called before conclusion of test\";\n  }\n\n  DeathTest::set_last_death_test_message(buffer.GetString());\n  return success;\n}\n\n# if GTEST_OS_WINDOWS\n// WindowsDeathTest implements death tests on Windows. Due to the\n// specifics of starting new processes on Windows, death tests there are\n// always threadsafe, and Google Test considers the\n// --gtest_death_test_style=fast setting to be equivalent to\n// --gtest_death_test_style=threadsafe there.\n//\n// A few implementation notes:  Like the Linux version, the Windows\n// implementation uses pipes for child-to-parent communication. But due to\n// the specifics of pipes on Windows, some extra steps are required:\n//\n// 1. The parent creates a communication pipe and stores handles to both\n//    ends of it.\n// 2. The parent starts the child and provides it with the information\n//    necessary to acquire the handle to the write end of the pipe.\n// 3. The child acquires the write end of the pipe and signals the parent\n//    using a Windows event.\n// 4. Now the parent can release the write end of the pipe on its side. If\n//    this is done before step 3, the object's reference count goes down to\n//    0 and it is destroyed, preventing the child from acquiring it. The\n//    parent now has to release it, or read operations on the read end of\n//    the pipe will not return when the child terminates.\n// 5. The parent reads child's output through the pipe (outcome code and\n//    any possible error messages) from the pipe, and its stderr and then\n//    determines whether to fail the test.\n//\n// Note: to distinguish Win32 API calls from the local method and function\n// calls, the former are explicitly resolved in the global namespace.\n//\nclass WindowsDeathTest : public DeathTestImpl {\n public:\n  WindowsDeathTest(const char* a_statement,\n                   const RE* a_regex,\n                   const char* file,\n                   int line)\n      : DeathTestImpl(a_statement, a_regex), file_(file), line_(line) {}\n\n  // All of these virtual functions are inherited from DeathTest.\n  virtual int Wait();\n  virtual TestRole AssumeRole();\n\n private:\n  // The name of the file in which the death test is located.\n  const char* const file_;\n  // The line number on which the death test is located.\n  const int line_;\n  // Handle to the write end of the pipe to the child process.\n  AutoHandle write_handle_;\n  // Child process handle.\n  AutoHandle child_handle_;\n  // Event the child process uses to signal the parent that it has\n  // acquired the handle to the write end of the pipe. After seeing this\n  // event the parent can release its own handles to make sure its\n  // ReadFile() calls return when the child terminates.\n  AutoHandle event_handle_;\n};\n\n// Waits for the child in a death test to exit, returning its exit\n// status, or 0 if no child process exists.  As a side effect, sets the\n// outcome data member.\nint WindowsDeathTest::Wait() {\n  if (!spawned())\n    return 0;\n\n  // Wait until the child either signals that it has acquired the write end\n  // of the pipe or it dies.\n  const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() };\n  switch (::WaitForMultipleObjects(2,\n                                   wait_handles,\n                                   FALSE,  // Waits for any of the handles.\n                                   INFINITE)) {\n    case WAIT_OBJECT_0:\n    case WAIT_OBJECT_0 + 1:\n      break;\n    default:\n      GTEST_DEATH_TEST_CHECK_(false);  // Should not get here.\n  }\n\n  // The child has acquired the write end of the pipe or exited.\n  // We release the handle on our side and continue.\n  write_handle_.Reset();\n  event_handle_.Reset();\n\n  ReadAndInterpretStatusByte();\n\n  // Waits for the child process to exit if it haven't already. This\n  // returns immediately if the child has already exited, regardless of\n  // whether previous calls to WaitForMultipleObjects synchronized on this\n  // handle or not.\n  GTEST_DEATH_TEST_CHECK_(\n      WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(),\n                                             INFINITE));\n  DWORD status_code;\n  GTEST_DEATH_TEST_CHECK_(\n      ::GetExitCodeProcess(child_handle_.Get(), &status_code) != FALSE);\n  child_handle_.Reset();\n  set_status(static_cast<int>(status_code));\n  return status();\n}\n\n// The AssumeRole process for a Windows death test.  It creates a child\n// process with the same executable as the current process to run the\n// death test.  The child process is given the --gtest_filter and\n// --gtest_internal_run_death_test flags such that it knows to run the\n// current death test only.\nDeathTest::TestRole WindowsDeathTest::AssumeRole() {\n  const UnitTestImpl* const impl = GetUnitTestImpl();\n  const InternalRunDeathTestFlag* const flag =\n      impl->internal_run_death_test_flag();\n  const TestInfo* const info = impl->current_test_info();\n  const int death_test_index = info->result()->death_test_count();\n\n  if (flag != NULL) {\n    // ParseInternalRunDeathTestFlag() has performed all the necessary\n    // processing.\n    set_write_fd(flag->write_fd());\n    return EXECUTE_TEST;\n  }\n\n  // WindowsDeathTest uses an anonymous pipe to communicate results of\n  // a death test.\n  SECURITY_ATTRIBUTES handles_are_inheritable = {\n    sizeof(SECURITY_ATTRIBUTES), NULL, TRUE };\n  HANDLE read_handle, write_handle;\n  GTEST_DEATH_TEST_CHECK_(\n      ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable,\n                   0)  // Default buffer size.\n      != FALSE);\n  set_read_fd(::_open_osfhandle(reinterpret_cast<intptr_t>(read_handle),\n                                O_RDONLY));\n  write_handle_.Reset(write_handle);\n  event_handle_.Reset(::CreateEvent(\n      &handles_are_inheritable,\n      TRUE,    // The event will automatically reset to non-signaled state.\n      FALSE,   // The initial state is non-signalled.\n      NULL));  // The even is unnamed.\n  GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != NULL);\n  const std::string filter_flag =\n      std::string(\"--\") + GTEST_FLAG_PREFIX_ + kFilterFlag + \"=\" +\n      info->test_case_name() + \".\" + info->name();\n  const std::string internal_flag =\n      std::string(\"--\") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag +\n      \"=\" + file_ + \"|\" + StreamableToString(line_) + \"|\" +\n      StreamableToString(death_test_index) + \"|\" +\n      StreamableToString(static_cast<unsigned int>(::GetCurrentProcessId())) +\n      // size_t has the same width as pointers on both 32-bit and 64-bit\n      // Windows platforms.\n      // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx.\n      \"|\" + StreamableToString(reinterpret_cast<size_t>(write_handle)) +\n      \"|\" + StreamableToString(reinterpret_cast<size_t>(event_handle_.Get()));\n\n  char executable_path[_MAX_PATH + 1];  // NOLINT\n  GTEST_DEATH_TEST_CHECK_(\n      _MAX_PATH + 1 != ::GetModuleFileNameA(NULL,\n                                            executable_path,\n                                            _MAX_PATH));\n\n  std::string command_line =\n      std::string(::GetCommandLineA()) + \" \" + filter_flag + \" \\\"\" +\n      internal_flag + \"\\\"\";\n\n  DeathTest::set_last_death_test_message(\"\");\n\n  CaptureStderr();\n  // Flush the log buffers since the log streams are shared with the child.\n  FlushInfoLog();\n\n  // The child process will share the standard handles with the parent.\n  STARTUPINFOA startup_info;\n  memset(&startup_info, 0, sizeof(STARTUPINFO));\n  startup_info.dwFlags = STARTF_USESTDHANDLES;\n  startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE);\n  startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE);\n  startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE);\n\n  PROCESS_INFORMATION process_info;\n  GTEST_DEATH_TEST_CHECK_(::CreateProcessA(\n      executable_path,\n      const_cast<char*>(command_line.c_str()),\n      NULL,   // Retuned process handle is not inheritable.\n      NULL,   // Retuned thread handle is not inheritable.\n      TRUE,   // Child inherits all inheritable handles (for write_handle_).\n      0x0,    // Default creation flags.\n      NULL,   // Inherit the parent's environment.\n      UnitTest::GetInstance()->original_working_dir(),\n      &startup_info,\n      &process_info) != FALSE);\n  child_handle_.Reset(process_info.hProcess);\n  ::CloseHandle(process_info.hThread);\n  set_spawned(true);\n  return OVERSEE_TEST;\n}\n# else  // We are not on Windows.\n\n// ForkingDeathTest provides implementations for most of the abstract\n// methods of the DeathTest interface.  Only the AssumeRole method is\n// left undefined.\nclass ForkingDeathTest : public DeathTestImpl {\n public:\n  ForkingDeathTest(const char* statement, const RE* regex);\n\n  // All of these virtual functions are inherited from DeathTest.\n  virtual int Wait();\n\n protected:\n  void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }\n\n private:\n  // PID of child process during death test; 0 in the child process itself.\n  pid_t child_pid_;\n};\n\n// Constructs a ForkingDeathTest.\nForkingDeathTest::ForkingDeathTest(const char* a_statement, const RE* a_regex)\n    : DeathTestImpl(a_statement, a_regex),\n      child_pid_(-1) {}\n\n// Waits for the child in a death test to exit, returning its exit\n// status, or 0 if no child process exists.  As a side effect, sets the\n// outcome data member.\nint ForkingDeathTest::Wait() {\n  if (!spawned())\n    return 0;\n\n  ReadAndInterpretStatusByte();\n\n  int status_value;\n  GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0));\n  set_status(status_value);\n  return status_value;\n}\n\n// A concrete death test class that forks, then immediately runs the test\n// in the child process.\nclass NoExecDeathTest : public ForkingDeathTest {\n public:\n  NoExecDeathTest(const char* a_statement, const RE* a_regex) :\n      ForkingDeathTest(a_statement, a_regex) { }\n  virtual TestRole AssumeRole();\n};\n\n// The AssumeRole process for a fork-and-run death test.  It implements a\n// straightforward fork, with a simple pipe to transmit the status byte.\nDeathTest::TestRole NoExecDeathTest::AssumeRole() {\n  const size_t thread_count = GetThreadCount();\n  if (thread_count != 1) {\n    GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count);\n  }\n\n  int pipe_fd[2];\n  GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);\n\n  DeathTest::set_last_death_test_message(\"\");\n  CaptureStderr();\n  // When we fork the process below, the log file buffers are copied, but the\n  // file descriptors are shared.  We flush all log files here so that closing\n  // the file descriptors in the child process doesn't throw off the\n  // synchronization between descriptors and buffers in the parent process.\n  // This is as close to the fork as possible to avoid a race condition in case\n  // there are multiple threads running before the death test, and another\n  // thread writes to the log file.\n  FlushInfoLog();\n\n  const pid_t child_pid = fork();\n  GTEST_DEATH_TEST_CHECK_(child_pid != -1);\n  set_child_pid(child_pid);\n  if (child_pid == 0) {\n    GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0]));\n    set_write_fd(pipe_fd[1]);\n    // Redirects all logging to stderr in the child process to prevent\n    // concurrent writes to the log files.  We capture stderr in the parent\n    // process and append the child process' output to a log.\n    LogToStderr();\n    // Event forwarding to the listeners of event listener API mush be shut\n    // down in death test subprocesses.\n    GetUnitTestImpl()->listeners()->SuppressEventForwarding();\n    g_in_fast_death_test_child = true;\n    return EXECUTE_TEST;\n  } else {\n    GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));\n    set_read_fd(pipe_fd[0]);\n    set_spawned(true);\n    return OVERSEE_TEST;\n  }\n}\n\n// A concrete death test class that forks and re-executes the main\n// program from the beginning, with command-line flags set that cause\n// only this specific death test to be run.\nclass ExecDeathTest : public ForkingDeathTest {\n public:\n  ExecDeathTest(const char* a_statement, const RE* a_regex,\n                const char* file, int line) :\n      ForkingDeathTest(a_statement, a_regex), file_(file), line_(line) { }\n  virtual TestRole AssumeRole();\n private:\n  static ::std::vector<testing::internal::string>\n  GetArgvsForDeathTestChildProcess() {\n    ::std::vector<testing::internal::string> args = GetInjectableArgvs();\n#  if defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_)\n    ::std::vector<testing::internal::string> extra_args =\n        GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_();\n    args.insert(args.end(), extra_args.begin(), extra_args.end());\n#  endif  // defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_)\n    return args;\n  }\n  // The name of the file in which the death test is located.\n  const char* const file_;\n  // The line number on which the death test is located.\n  const int line_;\n};\n\n// Utility class for accumulating command-line arguments.\nclass Arguments {\n public:\n  Arguments() {\n    args_.push_back(NULL);\n  }\n\n  ~Arguments() {\n    for (std::vector<char*>::iterator i = args_.begin(); i != args_.end();\n         ++i) {\n      free(*i);\n    }\n  }\n  void AddArgument(const char* argument) {\n    args_.insert(args_.end() - 1, posix::StrDup(argument));\n  }\n\n  template <typename Str>\n  void AddArguments(const ::std::vector<Str>& arguments) {\n    for (typename ::std::vector<Str>::const_iterator i = arguments.begin();\n         i != arguments.end();\n         ++i) {\n      args_.insert(args_.end() - 1, posix::StrDup(i->c_str()));\n    }\n  }\n  char* const* Argv() {\n    return &args_[0];\n  }\n\n private:\n  std::vector<char*> args_;\n};\n\n// A struct that encompasses the arguments to the child process of a\n// threadsafe-style death test process.\nstruct ExecDeathTestArgs {\n  char* const* argv;  // Command-line arguments for the child's call to exec\n  int close_fd;       // File descriptor to close; the read end of a pipe\n};\n\n#  if GTEST_OS_MAC\ninline char** GetEnviron() {\n  // When Google Test is built as a framework on MacOS X, the environ variable\n  // is unavailable. Apple's documentation (man environ) recommends using\n  // _NSGetEnviron() instead.\n  return *_NSGetEnviron();\n}\n#  else\n// Some POSIX platforms expect you to declare environ. extern \"C\" makes\n// it reside in the global namespace.\nextern \"C\" char** environ;\ninline char** GetEnviron() { return environ; }\n#  endif  // GTEST_OS_MAC\n\n#  if !GTEST_OS_QNX\n// The main function for a threadsafe-style death test child process.\n// This function is called in a clone()-ed process and thus must avoid\n// any potentially unsafe operations like malloc or libc functions.\nstatic int ExecDeathTestChildMain(void* child_arg) {\n  ExecDeathTestArgs* const args = static_cast<ExecDeathTestArgs*>(child_arg);\n  GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd));\n\n  // We need to execute the test program in the same environment where\n  // it was originally invoked.  Therefore we change to the original\n  // working directory first.\n  const char* const original_dir =\n      UnitTest::GetInstance()->original_working_dir();\n  // We can safely call chdir() as it's a direct system call.\n  if (chdir(original_dir) != 0) {\n    DeathTestAbort(std::string(\"chdir(\\\"\") + original_dir + \"\\\") failed: \" +\n                   GetLastErrnoDescription());\n    return EXIT_FAILURE;\n  }\n\n  // We can safely call execve() as it's a direct system call.  We\n  // cannot use execvp() as it's a libc function and thus potentially\n  // unsafe.  Since execve() doesn't search the PATH, the user must\n  // invoke the test program via a valid path that contains at least\n  // one path separator.\n  execve(args->argv[0], args->argv, GetEnviron());\n  DeathTestAbort(std::string(\"execve(\") + args->argv[0] + \", ...) in \" +\n                 original_dir + \" failed: \" +\n                 GetLastErrnoDescription());\n  return EXIT_FAILURE;\n}\n#  endif  // !GTEST_OS_QNX\n\n// Two utility routines that together determine the direction the stack\n// grows.\n// This could be accomplished more elegantly by a single recursive\n// function, but we want to guard against the unlikely possibility of\n// a smart compiler optimizing the recursion away.\n//\n// GTEST_NO_INLINE_ is required to prevent GCC 4.6 from inlining\n// StackLowerThanAddress into StackGrowsDown, which then doesn't give\n// correct answer.\nvoid StackLowerThanAddress(const void* ptr, bool* result) GTEST_NO_INLINE_;\nvoid StackLowerThanAddress(const void* ptr, bool* result) {\n  int dummy;\n  *result = (&dummy < ptr);\n}\n\n// Make sure AddressSanitizer does not tamper with the stack here.\nGTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_\nbool StackGrowsDown() {\n  int dummy;\n  bool result;\n  StackLowerThanAddress(&dummy, &result);\n  return result;\n}\n\n// Spawns a child process with the same executable as the current process in\n// a thread-safe manner and instructs it to run the death test.  The\n// implementation uses fork(2) + exec.  On systems where clone(2) is\n// available, it is used instead, being slightly more thread-safe.  On QNX,\n// fork supports only single-threaded environments, so this function uses\n// spawn(2) there instead.  The function dies with an error message if\n// anything goes wrong.\nstatic pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) {\n  ExecDeathTestArgs args = { argv, close_fd };\n  pid_t child_pid = -1;\n\n#  if GTEST_OS_QNX\n  // Obtains the current directory and sets it to be closed in the child\n  // process.\n  const int cwd_fd = open(\".\", O_RDONLY);\n  GTEST_DEATH_TEST_CHECK_(cwd_fd != -1);\n  GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(cwd_fd, F_SETFD, FD_CLOEXEC));\n  // We need to execute the test program in the same environment where\n  // it was originally invoked.  Therefore we change to the original\n  // working directory first.\n  const char* const original_dir =\n      UnitTest::GetInstance()->original_working_dir();\n  // We can safely call chdir() as it's a direct system call.\n  if (chdir(original_dir) != 0) {\n    DeathTestAbort(std::string(\"chdir(\\\"\") + original_dir + \"\\\") failed: \" +\n                   GetLastErrnoDescription());\n    return EXIT_FAILURE;\n  }\n\n  int fd_flags;\n  // Set close_fd to be closed after spawn.\n  GTEST_DEATH_TEST_CHECK_SYSCALL_(fd_flags = fcntl(close_fd, F_GETFD));\n  GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(close_fd, F_SETFD,\n                                        fd_flags | FD_CLOEXEC));\n  struct inheritance inherit = {0};\n  // spawn is a system call.\n  child_pid = spawn(args.argv[0], 0, NULL, &inherit, args.argv, GetEnviron());\n  // Restores the current working directory.\n  GTEST_DEATH_TEST_CHECK_(fchdir(cwd_fd) != -1);\n  GTEST_DEATH_TEST_CHECK_SYSCALL_(close(cwd_fd));\n\n#  else   // GTEST_OS_QNX\n#   if GTEST_OS_LINUX\n  // When a SIGPROF signal is received while fork() or clone() are executing,\n  // the process may hang. To avoid this, we ignore SIGPROF here and re-enable\n  // it after the call to fork()/clone() is complete.\n  struct sigaction saved_sigprof_action;\n  struct sigaction ignore_sigprof_action;\n  memset(&ignore_sigprof_action, 0, sizeof(ignore_sigprof_action));\n  sigemptyset(&ignore_sigprof_action.sa_mask);\n  ignore_sigprof_action.sa_handler = SIG_IGN;\n  GTEST_DEATH_TEST_CHECK_SYSCALL_(sigaction(\n      SIGPROF, &ignore_sigprof_action, &saved_sigprof_action));\n#   endif  // GTEST_OS_LINUX\n\n#   if GTEST_HAS_CLONE\n  const bool use_fork = GTEST_FLAG(death_test_use_fork);\n\n  if (!use_fork) {\n    static const bool stack_grows_down = StackGrowsDown();\n    const size_t stack_size = getpagesize();\n    // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead.\n    void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE,\n                             MAP_ANON | MAP_PRIVATE, -1, 0);\n    GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED);\n\n    // Maximum stack alignment in bytes:  For a downward-growing stack, this\n    // amount is subtracted from size of the stack space to get an address\n    // that is within the stack space and is aligned on all systems we care\n    // about.  As far as I know there is no ABI with stack alignment greater\n    // than 64.  We assume stack and stack_size already have alignment of\n    // kMaxStackAlignment.\n    const size_t kMaxStackAlignment = 64;\n    void* const stack_top =\n        static_cast<char*>(stack) +\n            (stack_grows_down ? stack_size - kMaxStackAlignment : 0);\n    GTEST_DEATH_TEST_CHECK_(stack_size > kMaxStackAlignment &&\n        reinterpret_cast<intptr_t>(stack_top) % kMaxStackAlignment == 0);\n\n    child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args);\n\n    GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1);\n  }\n#   else\n  const bool use_fork = true;\n#   endif  // GTEST_HAS_CLONE\n\n  if (use_fork && (child_pid = fork()) == 0) {\n      ExecDeathTestChildMain(&args);\n      _exit(0);\n  }\n#  endif  // GTEST_OS_QNX\n#  if GTEST_OS_LINUX\n  GTEST_DEATH_TEST_CHECK_SYSCALL_(\n      sigaction(SIGPROF, &saved_sigprof_action, NULL));\n#  endif  // GTEST_OS_LINUX\n\n  GTEST_DEATH_TEST_CHECK_(child_pid != -1);\n  return child_pid;\n}\n\n// The AssumeRole process for a fork-and-exec death test.  It re-executes the\n// main program from the beginning, setting the --gtest_filter\n// and --gtest_internal_run_death_test flags to cause only the current\n// death test to be re-run.\nDeathTest::TestRole ExecDeathTest::AssumeRole() {\n  const UnitTestImpl* const impl = GetUnitTestImpl();\n  const InternalRunDeathTestFlag* const flag =\n      impl->internal_run_death_test_flag();\n  const TestInfo* const info = impl->current_test_info();\n  const int death_test_index = info->result()->death_test_count();\n\n  if (flag != NULL) {\n    set_write_fd(flag->write_fd());\n    return EXECUTE_TEST;\n  }\n\n  int pipe_fd[2];\n  GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);\n  // Clear the close-on-exec flag on the write end of the pipe, lest\n  // it be closed when the child process does an exec:\n  GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1);\n\n  const std::string filter_flag =\n      std::string(\"--\") + GTEST_FLAG_PREFIX_ + kFilterFlag + \"=\"\n      + info->test_case_name() + \".\" + info->name();\n  const std::string internal_flag =\n      std::string(\"--\") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + \"=\"\n      + file_ + \"|\" + StreamableToString(line_) + \"|\"\n      + StreamableToString(death_test_index) + \"|\"\n      + StreamableToString(pipe_fd[1]);\n  Arguments args;\n  args.AddArguments(GetArgvsForDeathTestChildProcess());\n  args.AddArgument(filter_flag.c_str());\n  args.AddArgument(internal_flag.c_str());\n\n  DeathTest::set_last_death_test_message(\"\");\n\n  CaptureStderr();\n  // See the comment in NoExecDeathTest::AssumeRole for why the next line\n  // is necessary.\n  FlushInfoLog();\n\n  const pid_t child_pid = ExecDeathTestSpawnChild(args.Argv(), pipe_fd[0]);\n  GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));\n  set_child_pid(child_pid);\n  set_read_fd(pipe_fd[0]);\n  set_spawned(true);\n  return OVERSEE_TEST;\n}\n\n# endif  // !GTEST_OS_WINDOWS\n\n// Creates a concrete DeathTest-derived class that depends on the\n// --gtest_death_test_style flag, and sets the pointer pointed to\n// by the \"test\" argument to its address.  If the test should be\n// skipped, sets that pointer to NULL.  Returns true, unless the\n// flag is set to an invalid value.\nbool DefaultDeathTestFactory::Create(const char* statement, const RE* regex,\n                                     const char* file, int line,\n                                     DeathTest** test) {\n  UnitTestImpl* const impl = GetUnitTestImpl();\n  const InternalRunDeathTestFlag* const flag =\n      impl->internal_run_death_test_flag();\n  const int death_test_index = impl->current_test_info()\n      ->increment_death_test_count();\n\n  if (flag != NULL) {\n    if (death_test_index > flag->index()) {\n      DeathTest::set_last_death_test_message(\n          \"Death test count (\" + StreamableToString(death_test_index)\n          + \") somehow exceeded expected maximum (\"\n          + StreamableToString(flag->index()) + \")\");\n      return false;\n    }\n\n    if (!(flag->file() == file && flag->line() == line &&\n          flag->index() == death_test_index)) {\n      *test = NULL;\n      return true;\n    }\n  }\n\n# if GTEST_OS_WINDOWS\n\n  if (GTEST_FLAG(death_test_style) == \"threadsafe\" ||\n      GTEST_FLAG(death_test_style) == \"fast\") {\n    *test = new WindowsDeathTest(statement, regex, file, line);\n  }\n\n# else\n\n  if (GTEST_FLAG(death_test_style) == \"threadsafe\") {\n    *test = new ExecDeathTest(statement, regex, file, line);\n  } else if (GTEST_FLAG(death_test_style) == \"fast\") {\n    *test = new NoExecDeathTest(statement, regex);\n  }\n\n# endif  // GTEST_OS_WINDOWS\n\n  else {  // NOLINT - this is more readable than unbalanced brackets inside #if.\n    DeathTest::set_last_death_test_message(\n        \"Unknown death test style \\\"\" + GTEST_FLAG(death_test_style)\n        + \"\\\" encountered\");\n    return false;\n  }\n\n  return true;\n}\n\n# if GTEST_OS_WINDOWS\n// Recreates the pipe and event handles from the provided parameters,\n// signals the event, and returns a file descriptor wrapped around the pipe\n// handle. This function is called in the child process only.\nint GetStatusFileDescriptor(unsigned int parent_process_id,\n                            size_t write_handle_as_size_t,\n                            size_t event_handle_as_size_t) {\n  AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE,\n                                                   FALSE,  // Non-inheritable.\n                                                   parent_process_id));\n  if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) {\n    DeathTestAbort(\"Unable to open parent process \" +\n                   StreamableToString(parent_process_id));\n  }\n\n  // TODO(vladl@google.com): Replace the following check with a\n  // compile-time assertion when available.\n  GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t));\n\n  const HANDLE write_handle =\n      reinterpret_cast<HANDLE>(write_handle_as_size_t);\n  HANDLE dup_write_handle;\n\n  // The newly initialized handle is accessible only in in the parent\n  // process. To obtain one accessible within the child, we need to use\n  // DuplicateHandle.\n  if (!::DuplicateHandle(parent_process_handle.Get(), write_handle,\n                         ::GetCurrentProcess(), &dup_write_handle,\n                         0x0,    // Requested privileges ignored since\n                                 // DUPLICATE_SAME_ACCESS is used.\n                         FALSE,  // Request non-inheritable handler.\n                         DUPLICATE_SAME_ACCESS)) {\n    DeathTestAbort(\"Unable to duplicate the pipe handle \" +\n                   StreamableToString(write_handle_as_size_t) +\n                   \" from the parent process \" +\n                   StreamableToString(parent_process_id));\n  }\n\n  const HANDLE event_handle = reinterpret_cast<HANDLE>(event_handle_as_size_t);\n  HANDLE dup_event_handle;\n\n  if (!::DuplicateHandle(parent_process_handle.Get(), event_handle,\n                         ::GetCurrentProcess(), &dup_event_handle,\n                         0x0,\n                         FALSE,\n                         DUPLICATE_SAME_ACCESS)) {\n    DeathTestAbort(\"Unable to duplicate the event handle \" +\n                   StreamableToString(event_handle_as_size_t) +\n                   \" from the parent process \" +\n                   StreamableToString(parent_process_id));\n  }\n\n  const int write_fd =\n      ::_open_osfhandle(reinterpret_cast<intptr_t>(dup_write_handle), O_APPEND);\n  if (write_fd == -1) {\n    DeathTestAbort(\"Unable to convert pipe handle \" +\n                   StreamableToString(write_handle_as_size_t) +\n                   \" to a file descriptor\");\n  }\n\n  // Signals the parent that the write end of the pipe has been acquired\n  // so the parent can release its own write end.\n  ::SetEvent(dup_event_handle);\n\n  return write_fd;\n}\n# endif  // GTEST_OS_WINDOWS\n\n// Returns a newly created InternalRunDeathTestFlag object with fields\n// initialized from the GTEST_FLAG(internal_run_death_test) flag if\n// the flag is specified; otherwise returns NULL.\nInternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() {\n  if (GTEST_FLAG(internal_run_death_test) == \"\") return NULL;\n\n  // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we\n  // can use it here.\n  int line = -1;\n  int index = -1;\n  ::std::vector< ::std::string> fields;\n  SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields);\n  int write_fd = -1;\n\n# if GTEST_OS_WINDOWS\n\n  unsigned int parent_process_id = 0;\n  size_t write_handle_as_size_t = 0;\n  size_t event_handle_as_size_t = 0;\n\n  if (fields.size() != 6\n      || !ParseNaturalNumber(fields[1], &line)\n      || !ParseNaturalNumber(fields[2], &index)\n      || !ParseNaturalNumber(fields[3], &parent_process_id)\n      || !ParseNaturalNumber(fields[4], &write_handle_as_size_t)\n      || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) {\n    DeathTestAbort(\"Bad --gtest_internal_run_death_test flag: \" +\n                   GTEST_FLAG(internal_run_death_test));\n  }\n  write_fd = GetStatusFileDescriptor(parent_process_id,\n                                     write_handle_as_size_t,\n                                     event_handle_as_size_t);\n# else\n\n  if (fields.size() != 4\n      || !ParseNaturalNumber(fields[1], &line)\n      || !ParseNaturalNumber(fields[2], &index)\n      || !ParseNaturalNumber(fields[3], &write_fd)) {\n    DeathTestAbort(\"Bad --gtest_internal_run_death_test flag: \"\n        + GTEST_FLAG(internal_run_death_test));\n  }\n\n# endif  // GTEST_OS_WINDOWS\n\n  return new InternalRunDeathTestFlag(fields[0], line, index, write_fd);\n}\n\n}  // namespace internal\n\n#endif  // GTEST_HAS_DEATH_TEST\n\n}  // namespace testing\n// Copyright 2008, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Authors: keith.ray@gmail.com (Keith Ray)\n\n\n#include <stdlib.h>\n\n#if GTEST_OS_WINDOWS_MOBILE\n# include <windows.h>\n#elif GTEST_OS_WINDOWS\n# include <direct.h>\n# include <io.h>\n#elif GTEST_OS_SYMBIAN\n// Symbian OpenC has PATH_MAX in sys/syslimits.h\n# include <sys/syslimits.h>\n#else\n# include <limits.h>\n# include <climits>  // Some Linux distributions define PATH_MAX here.\n#endif  // GTEST_OS_WINDOWS_MOBILE\n\n#if GTEST_OS_WINDOWS\n# define GTEST_PATH_MAX_ _MAX_PATH\n#elif defined(PATH_MAX)\n# define GTEST_PATH_MAX_ PATH_MAX\n#elif defined(_XOPEN_PATH_MAX)\n# define GTEST_PATH_MAX_ _XOPEN_PATH_MAX\n#else\n# define GTEST_PATH_MAX_ _POSIX_PATH_MAX\n#endif  // GTEST_OS_WINDOWS\n\n\nnamespace testing {\nnamespace internal {\n\n#if GTEST_OS_WINDOWS\n// On Windows, '\\\\' is the standard path separator, but many tools and the\n// Windows API also accept '/' as an alternate path separator. Unless otherwise\n// noted, a file path can contain either kind of path separators, or a mixture\n// of them.\nconst char kPathSeparator = '\\\\';\nconst char kAlternatePathSeparator = '/';\nconst char kAlternatePathSeparatorString[] = \"/\";\n# if GTEST_OS_WINDOWS_MOBILE\n// Windows CE doesn't have a current directory. You should not use\n// the current directory in tests on Windows CE, but this at least\n// provides a reasonable fallback.\nconst char kCurrentDirectoryString[] = \"\\\\\";\n// Windows CE doesn't define INVALID_FILE_ATTRIBUTES\nconst DWORD kInvalidFileAttributes = 0xffffffff;\n# else\nconst char kCurrentDirectoryString[] = \".\\\\\";\n# endif  // GTEST_OS_WINDOWS_MOBILE\n#else\nconst char kPathSeparator = '/';\nconst char kCurrentDirectoryString[] = \"./\";\n#endif  // GTEST_OS_WINDOWS\n\n// Returns whether the given character is a valid path separator.\nstatic bool IsPathSeparator(char c) {\n#if GTEST_HAS_ALT_PATH_SEP_\n  return (c == kPathSeparator) || (c == kAlternatePathSeparator);\n#else\n  return c == kPathSeparator;\n#endif\n}\n\n// Returns the current working directory, or \"\" if unsuccessful.\nFilePath FilePath::GetCurrentDir() {\n#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT\n  // Windows CE doesn't have a current directory, so we just return\n  // something reasonable.\n  return FilePath(kCurrentDirectoryString);\n#elif GTEST_OS_WINDOWS\n  char cwd[GTEST_PATH_MAX_ + 1] = { '\\0' };\n  return FilePath(_getcwd(cwd, sizeof(cwd)) == NULL ? \"\" : cwd);\n#else\n  char cwd[GTEST_PATH_MAX_ + 1] = { '\\0' };\n  char* result = getcwd(cwd, sizeof(cwd));\n# if GTEST_OS_NACL\n  // getcwd will likely fail in NaCl due to the sandbox, so return something\n  // reasonable. The user may have provided a shim implementation for getcwd,\n  // however, so fallback only when failure is detected.\n  return FilePath(result == NULL ? kCurrentDirectoryString : cwd);\n# endif  // GTEST_OS_NACL\n  return FilePath(result == NULL ? \"\" : cwd);\n#endif  // GTEST_OS_WINDOWS_MOBILE\n}\n\n// Returns a copy of the FilePath with the case-insensitive extension removed.\n// Example: FilePath(\"dir/file.exe\").RemoveExtension(\"EXE\") returns\n// FilePath(\"dir/file\"). If a case-insensitive extension is not\n// found, returns a copy of the original FilePath.\nFilePath FilePath::RemoveExtension(const char* extension) const {\n  const std::string dot_extension = std::string(\".\") + extension;\n  if (String::EndsWithCaseInsensitive(pathname_, dot_extension)) {\n    return FilePath(pathname_.substr(\n        0, pathname_.length() - dot_extension.length()));\n  }\n  return *this;\n}\n\n// Returns a pointer to the last occurence of a valid path separator in\n// the FilePath. On Windows, for example, both '/' and '\\' are valid path\n// separators. Returns NULL if no path separator was found.\nconst char* FilePath::FindLastPathSeparator() const {\n  const char* const last_sep = strrchr(c_str(), kPathSeparator);\n#if GTEST_HAS_ALT_PATH_SEP_\n  const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator);\n  // Comparing two pointers of which only one is NULL is undefined.\n  if (last_alt_sep != NULL &&\n      (last_sep == NULL || last_alt_sep > last_sep)) {\n    return last_alt_sep;\n  }\n#endif\n  return last_sep;\n}\n\n// Returns a copy of the FilePath with the directory part removed.\n// Example: FilePath(\"path/to/file\").RemoveDirectoryName() returns\n// FilePath(\"file\"). If there is no directory part (\"just_a_file\"), it returns\n// the FilePath unmodified. If there is no file part (\"just_a_dir/\") it\n// returns an empty FilePath (\"\").\n// On Windows platform, '\\' is the path separator, otherwise it is '/'.\nFilePath FilePath::RemoveDirectoryName() const {\n  const char* const last_sep = FindLastPathSeparator();\n  return last_sep ? FilePath(last_sep + 1) : *this;\n}\n\n// RemoveFileName returns the directory path with the filename removed.\n// Example: FilePath(\"path/to/file\").RemoveFileName() returns \"path/to/\".\n// If the FilePath is \"a_file\" or \"/a_file\", RemoveFileName returns\n// FilePath(\"./\") or, on Windows, FilePath(\".\\\\\"). If the filepath does\n// not have a file, like \"just/a/dir/\", it returns the FilePath unmodified.\n// On Windows platform, '\\' is the path separator, otherwise it is '/'.\nFilePath FilePath::RemoveFileName() const {\n  const char* const last_sep = FindLastPathSeparator();\n  std::string dir;\n  if (last_sep) {\n    dir = std::string(c_str(), last_sep + 1 - c_str());\n  } else {\n    dir = kCurrentDirectoryString;\n  }\n  return FilePath(dir);\n}\n\n// Helper functions for naming files in a directory for xml output.\n\n// Given directory = \"dir\", base_name = \"test\", number = 0,\n// extension = \"xml\", returns \"dir/test.xml\". If number is greater\n// than zero (e.g., 12), returns \"dir/test_12.xml\".\n// On Windows platform, uses \\ as the separator rather than /.\nFilePath FilePath::MakeFileName(const FilePath& directory,\n                                const FilePath& base_name,\n                                int number,\n                                const char* extension) {\n  std::string file;\n  if (number == 0) {\n    file = base_name.string() + \".\" + extension;\n  } else {\n    file = base_name.string() + \"_\" + StreamableToString(number)\n        + \".\" + extension;\n  }\n  return ConcatPaths(directory, FilePath(file));\n}\n\n// Given directory = \"dir\", relative_path = \"test.xml\", returns \"dir/test.xml\".\n// On Windows, uses \\ as the separator rather than /.\nFilePath FilePath::ConcatPaths(const FilePath& directory,\n                               const FilePath& relative_path) {\n  if (directory.IsEmpty())\n    return relative_path;\n  const FilePath dir(directory.RemoveTrailingPathSeparator());\n  return FilePath(dir.string() + kPathSeparator + relative_path.string());\n}\n\n// Returns true if pathname describes something findable in the file-system,\n// either a file, directory, or whatever.\nbool FilePath::FileOrDirectoryExists() const {\n#if GTEST_OS_WINDOWS_MOBILE\n  LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str());\n  const DWORD attributes = GetFileAttributes(unicode);\n  delete [] unicode;\n  return attributes != kInvalidFileAttributes;\n#else\n  posix::StatStruct file_stat;\n  return posix::Stat(pathname_.c_str(), &file_stat) == 0;\n#endif  // GTEST_OS_WINDOWS_MOBILE\n}\n\n// Returns true if pathname describes a directory in the file-system\n// that exists.\nbool FilePath::DirectoryExists() const {\n  bool result = false;\n#if GTEST_OS_WINDOWS\n  // Don't strip off trailing separator if path is a root directory on\n  // Windows (like \"C:\\\\\").\n  const FilePath& path(IsRootDirectory() ? *this :\n                                           RemoveTrailingPathSeparator());\n#else\n  const FilePath& path(*this);\n#endif\n\n#if GTEST_OS_WINDOWS_MOBILE\n  LPCWSTR unicode = String::AnsiToUtf16(path.c_str());\n  const DWORD attributes = GetFileAttributes(unicode);\n  delete [] unicode;\n  if ((attributes != kInvalidFileAttributes) &&\n      (attributes & FILE_ATTRIBUTE_DIRECTORY)) {\n    result = true;\n  }\n#else\n  posix::StatStruct file_stat;\n  result = posix::Stat(path.c_str(), &file_stat) == 0 &&\n      posix::IsDir(file_stat);\n#endif  // GTEST_OS_WINDOWS_MOBILE\n\n  return result;\n}\n\n// Returns true if pathname describes a root directory. (Windows has one\n// root directory per disk drive.)\nbool FilePath::IsRootDirectory() const {\n#if GTEST_OS_WINDOWS\n  // TODO(wan@google.com): on Windows a network share like\n  // \\\\server\\share can be a root directory, although it cannot be the\n  // current directory.  Handle this properly.\n  return pathname_.length() == 3 && IsAbsolutePath();\n#else\n  return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]);\n#endif\n}\n\n// Returns true if pathname describes an absolute path.\nbool FilePath::IsAbsolutePath() const {\n  const char* const name = pathname_.c_str();\n#if GTEST_OS_WINDOWS\n  return pathname_.length() >= 3 &&\n     ((name[0] >= 'a' && name[0] <= 'z') ||\n      (name[0] >= 'A' && name[0] <= 'Z')) &&\n     name[1] == ':' &&\n     IsPathSeparator(name[2]);\n#else\n  return IsPathSeparator(name[0]);\n#endif\n}\n\n// Returns a pathname for a file that does not currently exist. The pathname\n// will be directory/base_name.extension or\n// directory/base_name_<number>.extension if directory/base_name.extension\n// already exists. The number will be incremented until a pathname is found\n// that does not already exist.\n// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.\n// There could be a race condition if two or more processes are calling this\n// function at the same time -- they could both pick the same filename.\nFilePath FilePath::GenerateUniqueFileName(const FilePath& directory,\n                                          const FilePath& base_name,\n                                          const char* extension) {\n  FilePath full_pathname;\n  int number = 0;\n  do {\n    full_pathname.Set(MakeFileName(directory, base_name, number++, extension));\n  } while (full_pathname.FileOrDirectoryExists());\n  return full_pathname;\n}\n\n// Returns true if FilePath ends with a path separator, which indicates that\n// it is intended to represent a directory. Returns false otherwise.\n// This does NOT check that a directory (or file) actually exists.\nbool FilePath::IsDirectory() const {\n  return !pathname_.empty() &&\n         IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]);\n}\n\n// Create directories so that path exists. Returns true if successful or if\n// the directories already exist; returns false if unable to create directories\n// for any reason.\nbool FilePath::CreateDirectoriesRecursively() const {\n  if (!this->IsDirectory()) {\n    return false;\n  }\n\n  if (pathname_.length() == 0 || this->DirectoryExists()) {\n    return true;\n  }\n\n  const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName());\n  return parent.CreateDirectoriesRecursively() && this->CreateFolder();\n}\n\n// Create the directory so that path exists. Returns true if successful or\n// if the directory already exists; returns false if unable to create the\n// directory for any reason, including if the parent directory does not\n// exist. Not named \"CreateDirectory\" because that's a macro on Windows.\nbool FilePath::CreateFolder() const {\n#if GTEST_OS_WINDOWS_MOBILE\n  FilePath removed_sep(this->RemoveTrailingPathSeparator());\n  LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str());\n  int result = CreateDirectory(unicode, NULL) ? 0 : -1;\n  delete [] unicode;\n#elif GTEST_OS_WINDOWS\n  int result = _mkdir(pathname_.c_str());\n#else\n  int result = mkdir(pathname_.c_str(), 0777);\n#endif  // GTEST_OS_WINDOWS_MOBILE\n\n  if (result == -1) {\n    return this->DirectoryExists();  // An error is OK if the directory exists.\n  }\n  return true;  // No error.\n}\n\n// If input name has a trailing separator character, remove it and return the\n// name, otherwise return the name string unmodified.\n// On Windows platform, uses \\ as the separator, other platforms use /.\nFilePath FilePath::RemoveTrailingPathSeparator() const {\n  return IsDirectory()\n      ? FilePath(pathname_.substr(0, pathname_.length() - 1))\n      : *this;\n}\n\n// Removes any redundant separators that might be in the pathname.\n// For example, \"bar///foo\" becomes \"bar/foo\". Does not eliminate other\n// redundancies that might be in a pathname involving \".\" or \"..\".\n// TODO(wan@google.com): handle Windows network shares (e.g. \\\\server\\share).\nvoid FilePath::Normalize() {\n  if (pathname_.c_str() == NULL) {\n    pathname_ = \"\";\n    return;\n  }\n  const char* src = pathname_.c_str();\n  char* const dest = new char[pathname_.length() + 1];\n  char* dest_ptr = dest;\n  memset(dest_ptr, 0, pathname_.length() + 1);\n\n  while (*src != '\\0') {\n    *dest_ptr = *src;\n    if (!IsPathSeparator(*src)) {\n      src++;\n    } else {\n#if GTEST_HAS_ALT_PATH_SEP_\n      if (*dest_ptr == kAlternatePathSeparator) {\n        *dest_ptr = kPathSeparator;\n      }\n#endif\n      while (IsPathSeparator(*src))\n        src++;\n    }\n    dest_ptr++;\n  }\n  *dest_ptr = '\\0';\n  pathname_ = dest;\n  delete[] dest;\n}\n\n}  // namespace internal\n}  // namespace testing\n// Copyright 2008, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan)\n\n\n#include <limits.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n#include <fstream>\n\n#if GTEST_OS_WINDOWS\n# include <windows.h>\n# include <io.h>\n# include <sys/stat.h>\n# include <map>  // Used in ThreadLocal.\n#else\n# include <unistd.h>\n#endif  // GTEST_OS_WINDOWS\n\n#if GTEST_OS_MAC\n# include <mach/mach_init.h>\n# include <mach/task.h>\n# include <mach/vm_map.h>\n#endif  // GTEST_OS_MAC\n\n#if GTEST_OS_QNX\n# include <devctl.h>\n# include <fcntl.h>\n# include <sys/procfs.h>\n#endif  // GTEST_OS_QNX\n\n#if GTEST_OS_AIX\n# include <procinfo.h>\n# include <sys/types.h>\n#endif  // GTEST_OS_AIX\n\n\n// Indicates that this translation unit is part of Google Test's\n// implementation.  It must come before gtest-internal-inl.h is\n// included, or there will be a compiler error.  This trick exists to\n// prevent the accidental inclusion of gtest-internal-inl.h in the\n// user's code.\n#define GTEST_IMPLEMENTATION_ 1\n#undef GTEST_IMPLEMENTATION_\n\nnamespace testing {\nnamespace internal {\n\n#if defined(_MSC_VER) || defined(__BORLANDC__)\n// MSVC and C++Builder do not provide a definition of STDERR_FILENO.\nconst int kStdOutFileno = 1;\nconst int kStdErrFileno = 2;\n#else\nconst int kStdOutFileno = STDOUT_FILENO;\nconst int kStdErrFileno = STDERR_FILENO;\n#endif  // _MSC_VER\n\n#if GTEST_OS_LINUX\n\nnamespace {\ntemplate <typename T>\nT ReadProcFileField(const string& filename, int field) {\n  std::string dummy;\n  std::ifstream file(filename.c_str());\n  while (field-- > 0) {\n    file >> dummy;\n  }\n  T output = 0;\n  file >> output;\n  return output;\n}\n}  // namespace\n\n// Returns the number of active threads, or 0 when there is an error.\nsize_t GetThreadCount() {\n  const string filename =\n      (Message() << \"/proc/\" << getpid() << \"/stat\").GetString();\n  return ReadProcFileField<int>(filename, 19);\n}\n\n#elif GTEST_OS_MAC\n\nsize_t GetThreadCount() {\n  const task_t task = mach_task_self();\n  mach_msg_type_number_t thread_count;\n  thread_act_array_t thread_list;\n  const kern_return_t status = task_threads(task, &thread_list, &thread_count);\n  if (status == KERN_SUCCESS) {\n    // task_threads allocates resources in thread_list and we need to free them\n    // to avoid leaks.\n    vm_deallocate(task,\n                  reinterpret_cast<vm_address_t>(thread_list),\n                  sizeof(thread_t) * thread_count);\n    return static_cast<size_t>(thread_count);\n  } else {\n    return 0;\n  }\n}\n\n#elif GTEST_OS_QNX\n\n// Returns the number of threads running in the process, or 0 to indicate that\n// we cannot detect it.\nsize_t GetThreadCount() {\n  const int fd = open(\"/proc/self/as\", O_RDONLY);\n  if (fd < 0) {\n    return 0;\n  }\n  procfs_info process_info;\n  const int status =\n      devctl(fd, DCMD_PROC_INFO, &process_info, sizeof(process_info), NULL);\n  close(fd);\n  if (status == EOK) {\n    return static_cast<size_t>(process_info.num_threads);\n  } else {\n    return 0;\n  }\n}\n\n#elif GTEST_OS_AIX\n\nsize_t GetThreadCount() {\n  struct procentry64 entry;\n  pid_t pid = getpid();\n  int status = getprocs64(&entry, sizeof(entry), NULL, 0, &pid, 1);\n  if (status == 1) {\n    return entry.pi_thcount;\n  } else {\n    return 0;\n  }\n}\n\n#else\n\nsize_t GetThreadCount() {\n  // There's no portable way to detect the number of threads, so we just\n  // return 0 to indicate that we cannot detect it.\n  return 0;\n}\n\n#endif  // GTEST_OS_LINUX\n\n#if GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS\n\nvoid SleepMilliseconds(int n) {\n  ::Sleep(n);\n}\n\nAutoHandle::AutoHandle()\n    : handle_(INVALID_HANDLE_VALUE) {}\n\nAutoHandle::AutoHandle(Handle handle)\n    : handle_(handle) {}\n\nAutoHandle::~AutoHandle() {\n  Reset();\n}\n\nAutoHandle::Handle AutoHandle::Get() const {\n  return handle_;\n}\n\nvoid AutoHandle::Reset() {\n  Reset(INVALID_HANDLE_VALUE);\n}\n\nvoid AutoHandle::Reset(HANDLE handle) {\n  // Resetting with the same handle we already own is invalid.\n  if (handle_ != handle) {\n    if (IsCloseable()) {\n      ::CloseHandle(handle_);\n    }\n    handle_ = handle;\n  } else {\n    GTEST_CHECK_(!IsCloseable())\n        << \"Resetting a valid handle to itself is likely a programmer error \"\n            \"and thus not allowed.\";\n  }\n}\n\nbool AutoHandle::IsCloseable() const {\n  // Different Windows APIs may use either of these values to represent an\n  // invalid handle.\n  return handle_ != NULL && handle_ != INVALID_HANDLE_VALUE;\n}\n\nNotification::Notification()\n    : event_(::CreateEvent(NULL,   // Default security attributes.\n                           TRUE,   // Do not reset automatically.\n                           FALSE,  // Initially unset.\n                           NULL)) {  // Anonymous event.\n  GTEST_CHECK_(event_.Get() != NULL);\n}\n\nvoid Notification::Notify() {\n  GTEST_CHECK_(::SetEvent(event_.Get()) != FALSE);\n}\n\nvoid Notification::WaitForNotification() {\n  GTEST_CHECK_(\n      ::WaitForSingleObject(event_.Get(), INFINITE) == WAIT_OBJECT_0);\n}\n\nMutex::Mutex()\n    : owner_thread_id_(0),\n      type_(kDynamic),\n      critical_section_init_phase_(0),\n      critical_section_(new CRITICAL_SECTION) {\n  ::InitializeCriticalSection(critical_section_);\n}\n\nMutex::~Mutex() {\n  // Static mutexes are leaked intentionally. It is not thread-safe to try\n  // to clean them up.\n  // TODO(yukawa): Switch to Slim Reader/Writer (SRW) Locks, which requires\n  // nothing to clean it up but is available only on Vista and later.\n  // http://msdn.microsoft.com/en-us/library/windows/desktop/aa904937.aspx\n  if (type_ == kDynamic) {\n    ::DeleteCriticalSection(critical_section_);\n    delete critical_section_;\n    critical_section_ = NULL;\n  }\n}\n\nvoid Mutex::Lock() {\n  ThreadSafeLazyInit();\n  ::EnterCriticalSection(critical_section_);\n  owner_thread_id_ = ::GetCurrentThreadId();\n}\n\nvoid Mutex::Unlock() {\n  ThreadSafeLazyInit();\n  // We don't protect writing to owner_thread_id_ here, as it's the\n  // caller's responsibility to ensure that the current thread holds the\n  // mutex when this is called.\n  owner_thread_id_ = 0;\n  ::LeaveCriticalSection(critical_section_);\n}\n\n// Does nothing if the current thread holds the mutex. Otherwise, crashes\n// with high probability.\nvoid Mutex::AssertHeld() {\n  ThreadSafeLazyInit();\n  GTEST_CHECK_(owner_thread_id_ == ::GetCurrentThreadId())\n      << \"The current thread is not holding the mutex @\" << this;\n}\n\n// Initializes owner_thread_id_ and critical_section_ in static mutexes.\nvoid Mutex::ThreadSafeLazyInit() {\n  // Dynamic mutexes are initialized in the constructor.\n  if (type_ == kStatic) {\n    switch (\n        ::InterlockedCompareExchange(&critical_section_init_phase_, 1L, 0L)) {\n      case 0:\n        // If critical_section_init_phase_ was 0 before the exchange, we\n        // are the first to test it and need to perform the initialization.\n        owner_thread_id_ = 0;\n        critical_section_ = new CRITICAL_SECTION;\n        ::InitializeCriticalSection(critical_section_);\n        // Updates the critical_section_init_phase_ to 2 to signal\n        // initialization complete.\n        GTEST_CHECK_(::InterlockedCompareExchange(\n                          &critical_section_init_phase_, 2L, 1L) ==\n                      1L);\n        break;\n      case 1:\n        // Somebody else is already initializing the mutex; spin until they\n        // are done.\n        while (::InterlockedCompareExchange(&critical_section_init_phase_,\n                                            2L,\n                                            2L) != 2L) {\n          // Possibly yields the rest of the thread's time slice to other\n          // threads.\n          ::Sleep(0);\n        }\n        break;\n\n      case 2:\n        break;  // The mutex is already initialized and ready for use.\n\n      default:\n        GTEST_CHECK_(false)\n            << \"Unexpected value of critical_section_init_phase_ \"\n            << \"while initializing a static mutex.\";\n    }\n  }\n}\n\nnamespace {\n\nclass ThreadWithParamSupport : public ThreadWithParamBase {\n public:\n  static HANDLE CreateThread(Runnable* runnable,\n                             Notification* thread_can_start) {\n    ThreadMainParam* param = new ThreadMainParam(runnable, thread_can_start);\n    DWORD thread_id;\n    // TODO(yukawa): Consider to use _beginthreadex instead.\n    HANDLE thread_handle = ::CreateThread(\n        NULL,    // Default security.\n        0,       // Default stack size.\n        &ThreadWithParamSupport::ThreadMain,\n        param,   // Parameter to ThreadMainStatic\n        0x0,     // Default creation flags.\n        &thread_id);  // Need a valid pointer for the call to work under Win98.\n    GTEST_CHECK_(thread_handle != NULL) << \"CreateThread failed with error \"\n                                        << ::GetLastError() << \".\";\n    if (thread_handle == NULL) {\n      delete param;\n    }\n    return thread_handle;\n  }\n\n private:\n  struct ThreadMainParam {\n    ThreadMainParam(Runnable* runnable, Notification* thread_can_start)\n        : runnable_(runnable),\n          thread_can_start_(thread_can_start) {\n    }\n    scoped_ptr<Runnable> runnable_;\n    // Does not own.\n    Notification* thread_can_start_;\n  };\n\n  static DWORD WINAPI ThreadMain(void* ptr) {\n    // Transfers ownership.\n    scoped_ptr<ThreadMainParam> param(static_cast<ThreadMainParam*>(ptr));\n    if (param->thread_can_start_ != NULL)\n      param->thread_can_start_->WaitForNotification();\n    param->runnable_->Run();\n    return 0;\n  }\n\n  // Prohibit instantiation.\n  ThreadWithParamSupport();\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParamSupport);\n};\n\n}  // namespace\n\nThreadWithParamBase::ThreadWithParamBase(Runnable *runnable,\n                                         Notification* thread_can_start)\n      : thread_(ThreadWithParamSupport::CreateThread(runnable,\n                                                     thread_can_start)) {\n}\n\nThreadWithParamBase::~ThreadWithParamBase() {\n  Join();\n}\n\nvoid ThreadWithParamBase::Join() {\n  GTEST_CHECK_(::WaitForSingleObject(thread_.Get(), INFINITE) == WAIT_OBJECT_0)\n      << \"Failed to join the thread with error \" << ::GetLastError() << \".\";\n}\n\n// Maps a thread to a set of ThreadIdToThreadLocals that have values\n// instantiated on that thread and notifies them when the thread exits.  A\n// ThreadLocal instance is expected to persist until all threads it has\n// values on have terminated.\nclass ThreadLocalRegistryImpl {\n public:\n  // Registers thread_local_instance as having value on the current thread.\n  // Returns a value that can be used to identify the thread from other threads.\n  static ThreadLocalValueHolderBase* GetValueOnCurrentThread(\n      const ThreadLocalBase* thread_local_instance) {\n    DWORD current_thread = ::GetCurrentThreadId();\n    MutexLock lock(&mutex_);\n    ThreadIdToThreadLocals* const thread_to_thread_locals =\n        GetThreadLocalsMapLocked();\n    ThreadIdToThreadLocals::iterator thread_local_pos =\n        thread_to_thread_locals->find(current_thread);\n    if (thread_local_pos == thread_to_thread_locals->end()) {\n      thread_local_pos = thread_to_thread_locals->insert(\n          std::make_pair(current_thread, ThreadLocalValues())).first;\n      StartWatcherThreadFor(current_thread);\n    }\n    ThreadLocalValues& thread_local_values = thread_local_pos->second;\n    ThreadLocalValues::iterator value_pos =\n        thread_local_values.find(thread_local_instance);\n    if (value_pos == thread_local_values.end()) {\n      value_pos =\n          thread_local_values\n              .insert(std::make_pair(\n                  thread_local_instance,\n                  linked_ptr<ThreadLocalValueHolderBase>(\n                      thread_local_instance->NewValueForCurrentThread())))\n              .first;\n    }\n    return value_pos->second.get();\n  }\n\n  static void OnThreadLocalDestroyed(\n      const ThreadLocalBase* thread_local_instance) {\n    std::vector<linked_ptr<ThreadLocalValueHolderBase> > value_holders;\n    // Clean up the ThreadLocalValues data structure while holding the lock, but\n    // defer the destruction of the ThreadLocalValueHolderBases.\n    {\n      MutexLock lock(&mutex_);\n      ThreadIdToThreadLocals* const thread_to_thread_locals =\n          GetThreadLocalsMapLocked();\n      for (ThreadIdToThreadLocals::iterator it =\n          thread_to_thread_locals->begin();\n          it != thread_to_thread_locals->end();\n          ++it) {\n        ThreadLocalValues& thread_local_values = it->second;\n        ThreadLocalValues::iterator value_pos =\n            thread_local_values.find(thread_local_instance);\n        if (value_pos != thread_local_values.end()) {\n          value_holders.push_back(value_pos->second);\n          thread_local_values.erase(value_pos);\n          // This 'if' can only be successful at most once, so theoretically we\n          // could break out of the loop here, but we don't bother doing so.\n        }\n      }\n    }\n    // Outside the lock, let the destructor for 'value_holders' deallocate the\n    // ThreadLocalValueHolderBases.\n  }\n\n  static void OnThreadExit(DWORD thread_id) {\n    GTEST_CHECK_(thread_id != 0) << ::GetLastError();\n    std::vector<linked_ptr<ThreadLocalValueHolderBase> > value_holders;\n    // Clean up the ThreadIdToThreadLocals data structure while holding the\n    // lock, but defer the destruction of the ThreadLocalValueHolderBases.\n    {\n      MutexLock lock(&mutex_);\n      ThreadIdToThreadLocals* const thread_to_thread_locals =\n          GetThreadLocalsMapLocked();\n      ThreadIdToThreadLocals::iterator thread_local_pos =\n          thread_to_thread_locals->find(thread_id);\n      if (thread_local_pos != thread_to_thread_locals->end()) {\n        ThreadLocalValues& thread_local_values = thread_local_pos->second;\n        for (ThreadLocalValues::iterator value_pos =\n            thread_local_values.begin();\n            value_pos != thread_local_values.end();\n            ++value_pos) {\n          value_holders.push_back(value_pos->second);\n        }\n        thread_to_thread_locals->erase(thread_local_pos);\n      }\n    }\n    // Outside the lock, let the destructor for 'value_holders' deallocate the\n    // ThreadLocalValueHolderBases.\n  }\n\n private:\n  // In a particular thread, maps a ThreadLocal object to its value.\n  typedef std::map<const ThreadLocalBase*,\n                   linked_ptr<ThreadLocalValueHolderBase> > ThreadLocalValues;\n  // Stores all ThreadIdToThreadLocals having values in a thread, indexed by\n  // thread's ID.\n  typedef std::map<DWORD, ThreadLocalValues> ThreadIdToThreadLocals;\n\n  // Holds the thread id and thread handle that we pass from\n  // StartWatcherThreadFor to WatcherThreadFunc.\n  typedef std::pair<DWORD, HANDLE> ThreadIdAndHandle;\n\n  static void StartWatcherThreadFor(DWORD thread_id) {\n    // The returned handle will be kept in thread_map and closed by\n    // watcher_thread in WatcherThreadFunc.\n    HANDLE thread = ::OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION,\n                                 FALSE,\n                                 thread_id);\n    GTEST_CHECK_(thread != NULL);\n    // We need to to pass a valid thread ID pointer into CreateThread for it\n    // to work correctly under Win98.\n    DWORD watcher_thread_id;\n    HANDLE watcher_thread = ::CreateThread(\n        NULL,   // Default security.\n        0,      // Default stack size\n        &ThreadLocalRegistryImpl::WatcherThreadFunc,\n        reinterpret_cast<LPVOID>(new ThreadIdAndHandle(thread_id, thread)),\n        CREATE_SUSPENDED,\n        &watcher_thread_id);\n    GTEST_CHECK_(watcher_thread != NULL);\n    // Give the watcher thread the same priority as ours to avoid being\n    // blocked by it.\n    ::SetThreadPriority(watcher_thread,\n                        ::GetThreadPriority(::GetCurrentThread()));\n    ::ResumeThread(watcher_thread);\n    ::CloseHandle(watcher_thread);\n  }\n\n  // Monitors exit from a given thread and notifies those\n  // ThreadIdToThreadLocals about thread termination.\n  static DWORD WINAPI WatcherThreadFunc(LPVOID param) {\n    const ThreadIdAndHandle* tah =\n        reinterpret_cast<const ThreadIdAndHandle*>(param);\n    GTEST_CHECK_(\n        ::WaitForSingleObject(tah->second, INFINITE) == WAIT_OBJECT_0);\n    OnThreadExit(tah->first);\n    ::CloseHandle(tah->second);\n    delete tah;\n    return 0;\n  }\n\n  // Returns map of thread local instances.\n  static ThreadIdToThreadLocals* GetThreadLocalsMapLocked() {\n    mutex_.AssertHeld();\n    static ThreadIdToThreadLocals* map = new ThreadIdToThreadLocals;\n    return map;\n  }\n\n  // Protects access to GetThreadLocalsMapLocked() and its return value.\n  static Mutex mutex_;\n  // Protects access to GetThreadMapLocked() and its return value.\n  static Mutex thread_map_mutex_;\n};\n\nMutex ThreadLocalRegistryImpl::mutex_(Mutex::kStaticMutex);\nMutex ThreadLocalRegistryImpl::thread_map_mutex_(Mutex::kStaticMutex);\n\nThreadLocalValueHolderBase* ThreadLocalRegistry::GetValueOnCurrentThread(\n      const ThreadLocalBase* thread_local_instance) {\n  return ThreadLocalRegistryImpl::GetValueOnCurrentThread(\n      thread_local_instance);\n}\n\nvoid ThreadLocalRegistry::OnThreadLocalDestroyed(\n      const ThreadLocalBase* thread_local_instance) {\n  ThreadLocalRegistryImpl::OnThreadLocalDestroyed(thread_local_instance);\n}\n\n#endif  // GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS\n\n#if GTEST_USES_POSIX_RE\n\n// Implements RE.  Currently only needed for death tests.\n\nRE::~RE() {\n  if (is_valid_) {\n    // regfree'ing an invalid regex might crash because the content\n    // of the regex is undefined. Since the regex's are essentially\n    // the same, one cannot be valid (or invalid) without the other\n    // being so too.\n    regfree(&partial_regex_);\n    regfree(&full_regex_);\n  }\n  free(const_cast<char*>(pattern_));\n}\n\n// Returns true iff regular expression re matches the entire str.\nbool RE::FullMatch(const char* str, const RE& re) {\n  if (!re.is_valid_) return false;\n\n  regmatch_t match;\n  return regexec(&re.full_regex_, str, 1, &match, 0) == 0;\n}\n\n// Returns true iff regular expression re matches a substring of str\n// (including str itself).\nbool RE::PartialMatch(const char* str, const RE& re) {\n  if (!re.is_valid_) return false;\n\n  regmatch_t match;\n  return regexec(&re.partial_regex_, str, 1, &match, 0) == 0;\n}\n\n// Initializes an RE from its string representation.\nvoid RE::Init(const char* regex) {\n  pattern_ = posix::StrDup(regex);\n\n  // Reserves enough bytes to hold the regular expression used for a\n  // full match.\n  const size_t full_regex_len = strlen(regex) + 10;\n  char* const full_pattern = new char[full_regex_len];\n\n  snprintf(full_pattern, full_regex_len, \"^(%s)$\", regex);\n  is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0;\n  // We want to call regcomp(&partial_regex_, ...) even if the\n  // previous expression returns false.  Otherwise partial_regex_ may\n  // not be properly initialized can may cause trouble when it's\n  // freed.\n  //\n  // Some implementation of POSIX regex (e.g. on at least some\n  // versions of Cygwin) doesn't accept the empty string as a valid\n  // regex.  We change it to an equivalent form \"()\" to be safe.\n  if (is_valid_) {\n    const char* const partial_regex = (*regex == '\\0') ? \"()\" : regex;\n    is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0;\n  }\n  EXPECT_TRUE(is_valid_)\n      << \"Regular expression \\\"\" << regex\n      << \"\\\" is not a valid POSIX Extended regular expression.\";\n\n  delete[] full_pattern;\n}\n\n#elif GTEST_USES_SIMPLE_RE\n\n// Returns true iff ch appears anywhere in str (excluding the\n// terminating '\\0' character).\nbool IsInSet(char ch, const char* str) {\n  return ch != '\\0' && strchr(str, ch) != NULL;\n}\n\n// Returns true iff ch belongs to the given classification.  Unlike\n// similar functions in <ctype.h>, these aren't affected by the\n// current locale.\nbool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }\nbool IsAsciiPunct(char ch) {\n  return IsInSet(ch, \"^-!\\\"#$%&'()*+,./:;<=>?@[\\\\]_`{|}~\");\n}\nbool IsRepeat(char ch) { return IsInSet(ch, \"?*+\"); }\nbool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, \" \\f\\n\\r\\t\\v\"); }\nbool IsAsciiWordChar(char ch) {\n  return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') ||\n      ('0' <= ch && ch <= '9') || ch == '_';\n}\n\n// Returns true iff \"\\\\c\" is a supported escape sequence.\nbool IsValidEscape(char c) {\n  return (IsAsciiPunct(c) || IsInSet(c, \"dDfnrsStvwW\"));\n}\n\n// Returns true iff the given atom (specified by escaped and pattern)\n// matches ch.  The result is undefined if the atom is invalid.\nbool AtomMatchesChar(bool escaped, char pattern_char, char ch) {\n  if (escaped) {  // \"\\\\p\" where p is pattern_char.\n    switch (pattern_char) {\n      case 'd': return IsAsciiDigit(ch);\n      case 'D': return !IsAsciiDigit(ch);\n      case 'f': return ch == '\\f';\n      case 'n': return ch == '\\n';\n      case 'r': return ch == '\\r';\n      case 's': return IsAsciiWhiteSpace(ch);\n      case 'S': return !IsAsciiWhiteSpace(ch);\n      case 't': return ch == '\\t';\n      case 'v': return ch == '\\v';\n      case 'w': return IsAsciiWordChar(ch);\n      case 'W': return !IsAsciiWordChar(ch);\n    }\n    return IsAsciiPunct(pattern_char) && pattern_char == ch;\n  }\n\n  return (pattern_char == '.' && ch != '\\n') || pattern_char == ch;\n}\n\n// Helper function used by ValidateRegex() to format error messages.\nstd::string FormatRegexSyntaxError(const char* regex, int index) {\n  return (Message() << \"Syntax error at index \" << index\n          << \" in simple regular expression \\\"\" << regex << \"\\\": \").GetString();\n}\n\n// Generates non-fatal failures and returns false if regex is invalid;\n// otherwise returns true.\nbool ValidateRegex(const char* regex) {\n  if (regex == NULL) {\n    // TODO(wan@google.com): fix the source file location in the\n    // assertion failures to match where the regex is used in user\n    // code.\n    ADD_FAILURE() << \"NULL is not a valid simple regular expression.\";\n    return false;\n  }\n\n  bool is_valid = true;\n\n  // True iff ?, *, or + can follow the previous atom.\n  bool prev_repeatable = false;\n  for (int i = 0; regex[i]; i++) {\n    if (regex[i] == '\\\\') {  // An escape sequence\n      i++;\n      if (regex[i] == '\\0') {\n        ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)\n                      << \"'\\\\' cannot appear at the end.\";\n        return false;\n      }\n\n      if (!IsValidEscape(regex[i])) {\n        ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)\n                      << \"invalid escape sequence \\\"\\\\\" << regex[i] << \"\\\".\";\n        is_valid = false;\n      }\n      prev_repeatable = true;\n    } else {  // Not an escape sequence.\n      const char ch = regex[i];\n\n      if (ch == '^' && i > 0) {\n        ADD_FAILURE() << FormatRegexSyntaxError(regex, i)\n                      << \"'^' can only appear at the beginning.\";\n        is_valid = false;\n      } else if (ch == '$' && regex[i + 1] != '\\0') {\n        ADD_FAILURE() << FormatRegexSyntaxError(regex, i)\n                      << \"'$' can only appear at the end.\";\n        is_valid = false;\n      } else if (IsInSet(ch, \"()[]{}|\")) {\n        ADD_FAILURE() << FormatRegexSyntaxError(regex, i)\n                      << \"'\" << ch << \"' is unsupported.\";\n        is_valid = false;\n      } else if (IsRepeat(ch) && !prev_repeatable) {\n        ADD_FAILURE() << FormatRegexSyntaxError(regex, i)\n                      << \"'\" << ch << \"' can only follow a repeatable token.\";\n        is_valid = false;\n      }\n\n      prev_repeatable = !IsInSet(ch, \"^$?*+\");\n    }\n  }\n\n  return is_valid;\n}\n\n// Matches a repeated regex atom followed by a valid simple regular\n// expression.  The regex atom is defined as c if escaped is false,\n// or \\c otherwise.  repeat is the repetition meta character (?, *,\n// or +).  The behavior is undefined if str contains too many\n// characters to be indexable by size_t, in which case the test will\n// probably time out anyway.  We are fine with this limitation as\n// std::string has it too.\nbool MatchRepetitionAndRegexAtHead(\n    bool escaped, char c, char repeat, const char* regex,\n    const char* str) {\n  const size_t min_count = (repeat == '+') ? 1 : 0;\n  const size_t max_count = (repeat == '?') ? 1 :\n      static_cast<size_t>(-1) - 1;\n  // We cannot call numeric_limits::max() as it conflicts with the\n  // max() macro on Windows.\n\n  for (size_t i = 0; i <= max_count; ++i) {\n    // We know that the atom matches each of the first i characters in str.\n    if (i >= min_count && MatchRegexAtHead(regex, str + i)) {\n      // We have enough matches at the head, and the tail matches too.\n      // Since we only care about *whether* the pattern matches str\n      // (as opposed to *how* it matches), there is no need to find a\n      // greedy match.\n      return true;\n    }\n    if (str[i] == '\\0' || !AtomMatchesChar(escaped, c, str[i]))\n      return false;\n  }\n  return false;\n}\n\n// Returns true iff regex matches a prefix of str.  regex must be a\n// valid simple regular expression and not start with \"^\", or the\n// result is undefined.\nbool MatchRegexAtHead(const char* regex, const char* str) {\n  if (*regex == '\\0')  // An empty regex matches a prefix of anything.\n    return true;\n\n  // \"$\" only matches the end of a string.  Note that regex being\n  // valid guarantees that there's nothing after \"$\" in it.\n  if (*regex == '$')\n    return *str == '\\0';\n\n  // Is the first thing in regex an escape sequence?\n  const bool escaped = *regex == '\\\\';\n  if (escaped)\n    ++regex;\n  if (IsRepeat(regex[1])) {\n    // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so\n    // here's an indirect recursion.  It terminates as the regex gets\n    // shorter in each recursion.\n    return MatchRepetitionAndRegexAtHead(\n        escaped, regex[0], regex[1], regex + 2, str);\n  } else {\n    // regex isn't empty, isn't \"$\", and doesn't start with a\n    // repetition.  We match the first atom of regex with the first\n    // character of str and recurse.\n    return (*str != '\\0') && AtomMatchesChar(escaped, *regex, *str) &&\n        MatchRegexAtHead(regex + 1, str + 1);\n  }\n}\n\n// Returns true iff regex matches any substring of str.  regex must be\n// a valid simple regular expression, or the result is undefined.\n//\n// The algorithm is recursive, but the recursion depth doesn't exceed\n// the regex length, so we won't need to worry about running out of\n// stack space normally.  In rare cases the time complexity can be\n// exponential with respect to the regex length + the string length,\n// but usually it's must faster (often close to linear).\nbool MatchRegexAnywhere(const char* regex, const char* str) {\n  if (regex == NULL || str == NULL)\n    return false;\n\n  if (*regex == '^')\n    return MatchRegexAtHead(regex + 1, str);\n\n  // A successful match can be anywhere in str.\n  do {\n    if (MatchRegexAtHead(regex, str))\n      return true;\n  } while (*str++ != '\\0');\n  return false;\n}\n\n// Implements the RE class.\n\nRE::~RE() {\n  free(const_cast<char*>(pattern_));\n  free(const_cast<char*>(full_pattern_));\n}\n\n// Returns true iff regular expression re matches the entire str.\nbool RE::FullMatch(const char* str, const RE& re) {\n  return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str);\n}\n\n// Returns true iff regular expression re matches a substring of str\n// (including str itself).\nbool RE::PartialMatch(const char* str, const RE& re) {\n  return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str);\n}\n\n// Initializes an RE from its string representation.\nvoid RE::Init(const char* regex) {\n  pattern_ = full_pattern_ = NULL;\n  if (regex != NULL) {\n    pattern_ = posix::StrDup(regex);\n  }\n\n  is_valid_ = ValidateRegex(regex);\n  if (!is_valid_) {\n    // No need to calculate the full pattern when the regex is invalid.\n    return;\n  }\n\n  const size_t len = strlen(regex);\n  // Reserves enough bytes to hold the regular expression used for a\n  // full match: we need space to prepend a '^', append a '$', and\n  // terminate the string with '\\0'.\n  char* buffer = static_cast<char*>(malloc(len + 3));\n  full_pattern_ = buffer;\n\n  if (*regex != '^')\n    *buffer++ = '^';  // Makes sure full_pattern_ starts with '^'.\n\n  // We don't use snprintf or strncpy, as they trigger a warning when\n  // compiled with VC++ 8.0.\n  memcpy(buffer, regex, len);\n  buffer += len;\n\n  if (len == 0 || regex[len - 1] != '$')\n    *buffer++ = '$';  // Makes sure full_pattern_ ends with '$'.\n\n  *buffer = '\\0';\n}\n\n#endif  // GTEST_USES_POSIX_RE\n\nconst char kUnknownFile[] = \"unknown file\";\n\n// Formats a source file path and a line number as they would appear\n// in an error message from the compiler used to compile this code.\nGTEST_API_ ::std::string FormatFileLocation(const char* file, int line) {\n  const std::string file_name(file == NULL ? kUnknownFile : file);\n\n  if (line < 0) {\n    return file_name + \":\";\n  }\n#ifdef _MSC_VER\n  return file_name + \"(\" + StreamableToString(line) + \"):\";\n#else\n  return file_name + \":\" + StreamableToString(line) + \":\";\n#endif  // _MSC_VER\n}\n\n// Formats a file location for compiler-independent XML output.\n// Although this function is not platform dependent, we put it next to\n// FormatFileLocation in order to contrast the two functions.\n// Note that FormatCompilerIndependentFileLocation() does NOT append colon\n// to the file location it produces, unlike FormatFileLocation().\nGTEST_API_ ::std::string FormatCompilerIndependentFileLocation(\n    const char* file, int line) {\n  const std::string file_name(file == NULL ? kUnknownFile : file);\n\n  if (line < 0)\n    return file_name;\n  else\n    return file_name + \":\" + StreamableToString(line);\n}\n\nGTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line)\n    : severity_(severity) {\n  const char* const marker =\n      severity == GTEST_INFO ?    \"[  INFO ]\" :\n      severity == GTEST_WARNING ? \"[WARNING]\" :\n      severity == GTEST_ERROR ?   \"[ ERROR ]\" : \"[ FATAL ]\";\n  GetStream() << ::std::endl << marker << \" \"\n              << FormatFileLocation(file, line).c_str() << \": \";\n}\n\n// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.\nGTestLog::~GTestLog() {\n  GetStream() << ::std::endl;\n  if (severity_ == GTEST_FATAL) {\n    fflush(stderr);\n    posix::Abort();\n  }\n}\n// Disable Microsoft deprecation warnings for POSIX functions called from\n// this class (creat, dup, dup2, and close)\nGTEST_DISABLE_MSC_WARNINGS_PUSH_(4996)\n\n#if GTEST_HAS_STREAM_REDIRECTION\n\n// Object that captures an output stream (stdout/stderr).\nclass CapturedStream {\n public:\n  // The ctor redirects the stream to a temporary file.\n  explicit CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {\n# if GTEST_OS_WINDOWS\n    char temp_dir_path[MAX_PATH + 1] = { '\\0' };  // NOLINT\n    char temp_file_path[MAX_PATH + 1] = { '\\0' };  // NOLINT\n\n    ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path);\n    const UINT success = ::GetTempFileNameA(temp_dir_path,\n                                            \"gtest_redir\",\n                                            0,  // Generate unique file name.\n                                            temp_file_path);\n    GTEST_CHECK_(success != 0)\n        << \"Unable to create a temporary file in \" << temp_dir_path;\n    const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE);\n    GTEST_CHECK_(captured_fd != -1) << \"Unable to open temporary file \"\n                                    << temp_file_path;\n    filename_ = temp_file_path;\n# else\n    // There's no guarantee that a test has write access to the current\n    // directory, so we create the temporary file in the /tmp directory\n    // instead. We use /tmp on most systems, and /sdcard on Android.\n    // That's because Android doesn't have /tmp.\n#  if GTEST_OS_LINUX_ANDROID\n    // Note: Android applications are expected to call the framework's\n    // Context.getExternalStorageDirectory() method through JNI to get\n    // the location of the world-writable SD Card directory. However,\n    // this requires a Context handle, which cannot be retrieved\n    // globally from native code. Doing so also precludes running the\n    // code as part of a regular standalone executable, which doesn't\n    // run in a Dalvik process (e.g. when running it through 'adb shell').\n    //\n    // The location /sdcard is directly accessible from native code\n    // and is the only location (unofficially) supported by the Android\n    // team. It's generally a symlink to the real SD Card mount point\n    // which can be /mnt/sdcard, /mnt/sdcard0, /system/media/sdcard, or\n    // other OEM-customized locations. Never rely on these, and always\n    // use /sdcard.\n    char name_template[] = \"/sdcard/gtest_captured_stream.XXXXXX\";\n#  else\n    char name_template[] = \"/tmp/captured_stream.XXXXXX\";\n#  endif  // GTEST_OS_LINUX_ANDROID\n    const int captured_fd = mkstemp(name_template);\n    filename_ = name_template;\n# endif  // GTEST_OS_WINDOWS\n    fflush(NULL);\n    dup2(captured_fd, fd_);\n    close(captured_fd);\n  }\n\n  ~CapturedStream() {\n    remove(filename_.c_str());\n  }\n\n  std::string GetCapturedString() {\n    if (uncaptured_fd_ != -1) {\n      // Restores the original stream.\n      fflush(NULL);\n      dup2(uncaptured_fd_, fd_);\n      close(uncaptured_fd_);\n      uncaptured_fd_ = -1;\n    }\n\n    FILE* const file = posix::FOpen(filename_.c_str(), \"r\");\n    const std::string content = ReadEntireFile(file);\n    posix::FClose(file);\n    return content;\n  }\n\n private:\n  const int fd_;  // A stream to capture.\n  int uncaptured_fd_;\n  // Name of the temporary file holding the stderr output.\n  ::std::string filename_;\n\n  GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream);\n};\n\nGTEST_DISABLE_MSC_WARNINGS_POP_()\n\nstatic CapturedStream* g_captured_stderr = NULL;\nstatic CapturedStream* g_captured_stdout = NULL;\n\n// Starts capturing an output stream (stdout/stderr).\nvoid CaptureStream(int fd, const char* stream_name, CapturedStream** stream) {\n  if (*stream != NULL) {\n    GTEST_LOG_(FATAL) << \"Only one \" << stream_name\n                      << \" capturer can exist at a time.\";\n  }\n  *stream = new CapturedStream(fd);\n}\n\n// Stops capturing the output stream and returns the captured string.\nstd::string GetCapturedStream(CapturedStream** captured_stream) {\n  const std::string content = (*captured_stream)->GetCapturedString();\n\n  delete *captured_stream;\n  *captured_stream = NULL;\n\n  return content;\n}\n\n// Starts capturing stdout.\nvoid CaptureStdout() {\n  CaptureStream(kStdOutFileno, \"stdout\", &g_captured_stdout);\n}\n\n// Starts capturing stderr.\nvoid CaptureStderr() {\n  CaptureStream(kStdErrFileno, \"stderr\", &g_captured_stderr);\n}\n\n// Stops capturing stdout and returns the captured string.\nstd::string GetCapturedStdout() {\n  return GetCapturedStream(&g_captured_stdout);\n}\n\n// Stops capturing stderr and returns the captured string.\nstd::string GetCapturedStderr() {\n  return GetCapturedStream(&g_captured_stderr);\n}\n\n#endif  // GTEST_HAS_STREAM_REDIRECTION\n\nstd::string TempDir() {\n#if GTEST_OS_WINDOWS_MOBILE\n  return \"\\\\temp\\\\\";\n#elif GTEST_OS_WINDOWS\n  const char* temp_dir = posix::GetEnv(\"TEMP\");\n  if (temp_dir == NULL || temp_dir[0] == '\\0')\n    return \"\\\\temp\\\\\";\n  else if (temp_dir[strlen(temp_dir) - 1] == '\\\\')\n    return temp_dir;\n  else\n    return std::string(temp_dir) + \"\\\\\";\n#elif GTEST_OS_LINUX_ANDROID\n  return \"/sdcard/\";\n#else\n  return \"/tmp/\";\n#endif  // GTEST_OS_WINDOWS_MOBILE\n}\n\nsize_t GetFileSize(FILE* file) {\n  fseek(file, 0, SEEK_END);\n  return static_cast<size_t>(ftell(file));\n}\n\nstd::string ReadEntireFile(FILE* file) {\n  const size_t file_size = GetFileSize(file);\n  char* const buffer = new char[file_size];\n\n  size_t bytes_last_read = 0;  // # of bytes read in the last fread()\n  size_t bytes_read = 0;       // # of bytes read so far\n\n  fseek(file, 0, SEEK_SET);\n\n  // Keeps reading the file until we cannot read further or the\n  // pre-determined file size is reached.\n  do {\n    bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file);\n    bytes_read += bytes_last_read;\n  } while (bytes_last_read > 0 && bytes_read < file_size);\n\n  const std::string content(buffer, bytes_read);\n  delete[] buffer;\n\n  return content;\n}\n\n#if GTEST_HAS_DEATH_TEST\n\nstatic const ::std::vector<testing::internal::string>* g_injected_test_argvs =\n                                        NULL;  // Owned.\n\nvoid SetInjectableArgvs(const ::std::vector<testing::internal::string>* argvs) {\n  if (g_injected_test_argvs != argvs)\n    delete g_injected_test_argvs;\n  g_injected_test_argvs = argvs;\n}\n\nconst ::std::vector<testing::internal::string>& GetInjectableArgvs() {\n  if (g_injected_test_argvs != NULL) {\n    return *g_injected_test_argvs;\n  }\n  return GetArgvs();\n}\n#endif  // GTEST_HAS_DEATH_TEST\n\n#if GTEST_OS_WINDOWS_MOBILE\nnamespace posix {\nvoid Abort() {\n  DebugBreak();\n  TerminateProcess(GetCurrentProcess(), 1);\n}\n}  // namespace posix\n#endif  // GTEST_OS_WINDOWS_MOBILE\n\n// Returns the name of the environment variable corresponding to the\n// given flag.  For example, FlagToEnvVar(\"foo\") will return\n// \"GTEST_FOO\" in the open-source version.\nstatic std::string FlagToEnvVar(const char* flag) {\n  const std::string full_flag =\n      (Message() << GTEST_FLAG_PREFIX_ << flag).GetString();\n\n  Message env_var;\n  for (size_t i = 0; i != full_flag.length(); i++) {\n    env_var << ToUpper(full_flag.c_str()[i]);\n  }\n\n  return env_var.GetString();\n}\n\n// Parses 'str' for a 32-bit signed integer.  If successful, writes\n// the result to *value and returns true; otherwise leaves *value\n// unchanged and returns false.\nbool ParseInt32(const Message& src_text, const char* str, Int32* value) {\n  // Parses the environment variable as a decimal integer.\n  char* end = NULL;\n  const long long_value = strtol(str, &end, 10);  // NOLINT\n\n  // Has strtol() consumed all characters in the string?\n  if (*end != '\\0') {\n    // No - an invalid character was encountered.\n    Message msg;\n    msg << \"WARNING: \" << src_text\n        << \" is expected to be a 32-bit integer, but actually\"\n        << \" has value \\\"\" << str << \"\\\".\\n\";\n    printf(\"%s\", msg.GetString().c_str());\n    fflush(stdout);\n    return false;\n  }\n\n  // Is the parsed value in the range of an Int32?\n  const Int32 result = static_cast<Int32>(long_value);\n  if (long_value == LONG_MAX || long_value == LONG_MIN ||\n      // The parsed value overflows as a long.  (strtol() returns\n      // LONG_MAX or LONG_MIN when the input overflows.)\n      result != long_value\n      // The parsed value overflows as an Int32.\n      ) {\n    Message msg;\n    msg << \"WARNING: \" << src_text\n        << \" is expected to be a 32-bit integer, but actually\"\n        << \" has value \" << str << \", which overflows.\\n\";\n    printf(\"%s\", msg.GetString().c_str());\n    fflush(stdout);\n    return false;\n  }\n\n  *value = result;\n  return true;\n}\n\n// Reads and returns the Boolean environment variable corresponding to\n// the given flag; if it's not set, returns default_value.\n//\n// The value is considered true iff it's not \"0\".\nbool BoolFromGTestEnv(const char* flag, bool default_value) {\n#if defined(GTEST_GET_BOOL_FROM_ENV_)\n  return GTEST_GET_BOOL_FROM_ENV_(flag, default_value);\n#endif  // defined(GTEST_GET_BOOL_FROM_ENV_)\n  const std::string env_var = FlagToEnvVar(flag);\n  const char* const string_value = posix::GetEnv(env_var.c_str());\n  return string_value == NULL ?\n      default_value : strcmp(string_value, \"0\") != 0;\n}\n\n// Reads and returns a 32-bit integer stored in the environment\n// variable corresponding to the given flag; if it isn't set or\n// doesn't represent a valid 32-bit integer, returns default_value.\nInt32 Int32FromGTestEnv(const char* flag, Int32 default_value) {\n#if defined(GTEST_GET_INT32_FROM_ENV_)\n  return GTEST_GET_INT32_FROM_ENV_(flag, default_value);\n#endif  // defined(GTEST_GET_INT32_FROM_ENV_)\n  const std::string env_var = FlagToEnvVar(flag);\n  const char* const string_value = posix::GetEnv(env_var.c_str());\n  if (string_value == NULL) {\n    // The environment variable is not set.\n    return default_value;\n  }\n\n  Int32 result = default_value;\n  if (!ParseInt32(Message() << \"Environment variable \" << env_var,\n                  string_value, &result)) {\n    printf(\"The default value %s is used.\\n\",\n           (Message() << default_value).GetString().c_str());\n    fflush(stdout);\n    return default_value;\n  }\n\n  return result;\n}\n\n// Reads and returns the string environment variable corresponding to\n// the given flag; if it's not set, returns default_value.\nconst char* StringFromGTestEnv(const char* flag, const char* default_value) {\n#if defined(GTEST_GET_STRING_FROM_ENV_)\n  return GTEST_GET_STRING_FROM_ENV_(flag, default_value);\n#endif  // defined(GTEST_GET_STRING_FROM_ENV_)\n  const std::string env_var = FlagToEnvVar(flag);\n  const char* const value = posix::GetEnv(env_var.c_str());\n  return value == NULL ? default_value : value;\n}\n\n}  // namespace internal\n}  // namespace testing\n// Copyright 2007, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan)\n\n// Google Test - The Google C++ Testing Framework\n//\n// This file implements a universal value printer that can print a\n// value of any type T:\n//\n//   void ::testing::internal::UniversalPrinter<T>::Print(value, ostream_ptr);\n//\n// It uses the << operator when possible, and prints the bytes in the\n// object otherwise.  A user can override its behavior for a class\n// type Foo by defining either operator<<(::std::ostream&, const Foo&)\n// or void PrintTo(const Foo&, ::std::ostream*) in the namespace that\n// defines Foo.\n\n#include <ctype.h>\n#include <stdio.h>\n#include <cwchar>\n#include <ostream>  // NOLINT\n#include <string>\n\nnamespace testing {\n\nnamespace {\n\nusing ::std::ostream;\n\n// Prints a segment of bytes in the given object.\nGTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_\nGTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_\nGTEST_ATTRIBUTE_NO_SANITIZE_THREAD_\nvoid PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start,\n                                size_t count, ostream* os) {\n  char text[5] = \"\";\n  for (size_t i = 0; i != count; i++) {\n    const size_t j = start + i;\n    if (i != 0) {\n      // Organizes the bytes into groups of 2 for easy parsing by\n      // human.\n      if ((j % 2) == 0)\n        *os << ' ';\n      else\n        *os << '-';\n    }\n    GTEST_SNPRINTF_(text, sizeof(text), \"%02X\", obj_bytes[j]);\n    *os << text;\n  }\n}\n\n// Prints the bytes in the given value to the given ostream.\nvoid PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t count,\n                              ostream* os) {\n  // Tells the user how big the object is.\n  *os << count << \"-byte object <\";\n\n  const size_t kThreshold = 132;\n  const size_t kChunkSize = 64;\n  // If the object size is bigger than kThreshold, we'll have to omit\n  // some details by printing only the first and the last kChunkSize\n  // bytes.\n  // TODO(wan): let the user control the threshold using a flag.\n  if (count < kThreshold) {\n    PrintByteSegmentInObjectTo(obj_bytes, 0, count, os);\n  } else {\n    PrintByteSegmentInObjectTo(obj_bytes, 0, kChunkSize, os);\n    *os << \" ... \";\n    // Rounds up to 2-byte boundary.\n    const size_t resume_pos = (count - kChunkSize + 1)/2*2;\n    PrintByteSegmentInObjectTo(obj_bytes, resume_pos, count - resume_pos, os);\n  }\n  *os << \">\";\n}\n\n}  // namespace\n\nnamespace internal2 {\n\n// Delegates to PrintBytesInObjectToImpl() to print the bytes in the\n// given object.  The delegation simplifies the implementation, which\n// uses the << operator and thus is easier done outside of the\n// ::testing::internal namespace, which contains a << operator that\n// sometimes conflicts with the one in STL.\nvoid PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count,\n                          ostream* os) {\n  PrintBytesInObjectToImpl(obj_bytes, count, os);\n}\n\n}  // namespace internal2\n\nnamespace internal {\n\n// Depending on the value of a char (or wchar_t), we print it in one\n// of three formats:\n//   - as is if it's a printable ASCII (e.g. 'a', '2', ' '),\n//   - as a hexidecimal escape sequence (e.g. '\\x7F'), or\n//   - as a special escape sequence (e.g. '\\r', '\\n').\nenum CharFormat {\n  kAsIs,\n  kHexEscape,\n  kSpecialEscape\n};\n\n// Returns true if c is a printable ASCII character.  We test the\n// value of c directly instead of calling isprint(), which is buggy on\n// Windows Mobile.\ninline bool IsPrintableAscii(wchar_t c) {\n  return 0x20 <= c && c <= 0x7E;\n}\n\n// Prints a wide or narrow char c as a character literal without the\n// quotes, escaping it when necessary; returns how c was formatted.\n// The template argument UnsignedChar is the unsigned version of Char,\n// which is the type of c.\ntemplate <typename UnsignedChar, typename Char>\nstatic CharFormat PrintAsCharLiteralTo(Char c, ostream* os) {\n  switch (static_cast<wchar_t>(c)) {\n    case L'\\0':\n      *os << \"\\\\0\";\n      break;\n    case L'\\'':\n      *os << \"\\\\'\";\n      break;\n    case L'\\\\':\n      *os << \"\\\\\\\\\";\n      break;\n    case L'\\a':\n      *os << \"\\\\a\";\n      break;\n    case L'\\b':\n      *os << \"\\\\b\";\n      break;\n    case L'\\f':\n      *os << \"\\\\f\";\n      break;\n    case L'\\n':\n      *os << \"\\\\n\";\n      break;\n    case L'\\r':\n      *os << \"\\\\r\";\n      break;\n    case L'\\t':\n      *os << \"\\\\t\";\n      break;\n    case L'\\v':\n      *os << \"\\\\v\";\n      break;\n    default:\n      if (IsPrintableAscii(c)) {\n        *os << static_cast<char>(c);\n        return kAsIs;\n      } else {\n        *os << \"\\\\x\" + String::FormatHexInt(static_cast<UnsignedChar>(c));\n        return kHexEscape;\n      }\n  }\n  return kSpecialEscape;\n}\n\n// Prints a wchar_t c as if it's part of a string literal, escaping it when\n// necessary; returns how c was formatted.\nstatic CharFormat PrintAsStringLiteralTo(wchar_t c, ostream* os) {\n  switch (c) {\n    case L'\\'':\n      *os << \"'\";\n      return kAsIs;\n    case L'\"':\n      *os << \"\\\\\\\"\";\n      return kSpecialEscape;\n    default:\n      return PrintAsCharLiteralTo<wchar_t>(c, os);\n  }\n}\n\n// Prints a char c as if it's part of a string literal, escaping it when\n// necessary; returns how c was formatted.\nstatic CharFormat PrintAsStringLiteralTo(char c, ostream* os) {\n  return PrintAsStringLiteralTo(\n      static_cast<wchar_t>(static_cast<unsigned char>(c)), os);\n}\n\n// Prints a wide or narrow character c and its code.  '\\0' is printed\n// as \"'\\\\0'\", other unprintable characters are also properly escaped\n// using the standard C++ escape sequence.  The template argument\n// UnsignedChar is the unsigned version of Char, which is the type of c.\ntemplate <typename UnsignedChar, typename Char>\nvoid PrintCharAndCodeTo(Char c, ostream* os) {\n  // First, print c as a literal in the most readable form we can find.\n  *os << ((sizeof(c) > 1) ? \"L'\" : \"'\");\n  const CharFormat format = PrintAsCharLiteralTo<UnsignedChar>(c, os);\n  *os << \"'\";\n\n  // To aid user debugging, we also print c's code in decimal, unless\n  // it's 0 (in which case c was printed as '\\\\0', making the code\n  // obvious).\n  if (c == 0)\n    return;\n  *os << \" (\" << static_cast<int>(c);\n\n  // For more convenience, we print c's code again in hexidecimal,\n  // unless c was already printed in the form '\\x##' or the code is in\n  // [1, 9].\n  if (format == kHexEscape || (1 <= c && c <= 9)) {\n    // Do nothing.\n  } else {\n    *os << \", 0x\" << String::FormatHexInt(static_cast<UnsignedChar>(c));\n  }\n  *os << \")\";\n}\n\nvoid PrintTo(unsigned char c, ::std::ostream* os) {\n  PrintCharAndCodeTo<unsigned char>(c, os);\n}\nvoid PrintTo(signed char c, ::std::ostream* os) {\n  PrintCharAndCodeTo<unsigned char>(c, os);\n}\n\n// Prints a wchar_t as a symbol if it is printable or as its internal\n// code otherwise and also as its code.  L'\\0' is printed as \"L'\\\\0'\".\nvoid PrintTo(wchar_t wc, ostream* os) {\n  PrintCharAndCodeTo<wchar_t>(wc, os);\n}\n\n// Prints the given array of characters to the ostream.  CharType must be either\n// char or wchar_t.\n// The array starts at begin, the length is len, it may include '\\0' characters\n// and may not be NUL-terminated.\ntemplate <typename CharType>\nGTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_\nGTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_\nGTEST_ATTRIBUTE_NO_SANITIZE_THREAD_\nstatic void PrintCharsAsStringTo(\n    const CharType* begin, size_t len, ostream* os) {\n  const char* const kQuoteBegin = sizeof(CharType) == 1 ? \"\\\"\" : \"L\\\"\";\n  *os << kQuoteBegin;\n  bool is_previous_hex = false;\n  for (size_t index = 0; index < len; ++index) {\n    const CharType cur = begin[index];\n    if (is_previous_hex && IsXDigit(cur)) {\n      // Previous character is of '\\x..' form and this character can be\n      // interpreted as another hexadecimal digit in its number. Break string to\n      // disambiguate.\n      *os << \"\\\" \" << kQuoteBegin;\n    }\n    is_previous_hex = PrintAsStringLiteralTo(cur, os) == kHexEscape;\n  }\n  *os << \"\\\"\";\n}\n\n// Prints a (const) char/wchar_t array of 'len' elements, starting at address\n// 'begin'.  CharType must be either char or wchar_t.\ntemplate <typename CharType>\nGTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_\nGTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_\nGTEST_ATTRIBUTE_NO_SANITIZE_THREAD_\nstatic void UniversalPrintCharArray(\n    const CharType* begin, size_t len, ostream* os) {\n  // The code\n  //   const char kFoo[] = \"foo\";\n  // generates an array of 4, not 3, elements, with the last one being '\\0'.\n  //\n  // Therefore when printing a char array, we don't print the last element if\n  // it's '\\0', such that the output matches the string literal as it's\n  // written in the source code.\n  if (len > 0 && begin[len - 1] == '\\0') {\n    PrintCharsAsStringTo(begin, len - 1, os);\n    return;\n  }\n\n  // If, however, the last element in the array is not '\\0', e.g.\n  //    const char kFoo[] = { 'f', 'o', 'o' };\n  // we must print the entire array.  We also print a message to indicate\n  // that the array is not NUL-terminated.\n  PrintCharsAsStringTo(begin, len, os);\n  *os << \" (no terminating NUL)\";\n}\n\n// Prints a (const) char array of 'len' elements, starting at address 'begin'.\nvoid UniversalPrintArray(const char* begin, size_t len, ostream* os) {\n  UniversalPrintCharArray(begin, len, os);\n}\n\n// Prints a (const) wchar_t array of 'len' elements, starting at address\n// 'begin'.\nvoid UniversalPrintArray(const wchar_t* begin, size_t len, ostream* os) {\n  UniversalPrintCharArray(begin, len, os);\n}\n\n// Prints the given C string to the ostream.\nvoid PrintTo(const char* s, ostream* os) {\n  if (s == NULL) {\n    *os << \"NULL\";\n  } else {\n    *os << ImplicitCast_<const void*>(s) << \" pointing to \";\n    PrintCharsAsStringTo(s, strlen(s), os);\n  }\n}\n\n// MSVC compiler can be configured to define whar_t as a typedef\n// of unsigned short. Defining an overload for const wchar_t* in that case\n// would cause pointers to unsigned shorts be printed as wide strings,\n// possibly accessing more memory than intended and causing invalid\n// memory accesses. MSVC defines _NATIVE_WCHAR_T_DEFINED symbol when\n// wchar_t is implemented as a native type.\n#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)\n// Prints the given wide C string to the ostream.\nvoid PrintTo(const wchar_t* s, ostream* os) {\n  if (s == NULL) {\n    *os << \"NULL\";\n  } else {\n    *os << ImplicitCast_<const void*>(s) << \" pointing to \";\n    PrintCharsAsStringTo(s, std::wcslen(s), os);\n  }\n}\n#endif  // wchar_t is native\n\n// Prints a ::string object.\n#if GTEST_HAS_GLOBAL_STRING\nvoid PrintStringTo(const ::string& s, ostream* os) {\n  PrintCharsAsStringTo(s.data(), s.size(), os);\n}\n#endif  // GTEST_HAS_GLOBAL_STRING\n\nvoid PrintStringTo(const ::std::string& s, ostream* os) {\n  PrintCharsAsStringTo(s.data(), s.size(), os);\n}\n\n// Prints a ::wstring object.\n#if GTEST_HAS_GLOBAL_WSTRING\nvoid PrintWideStringTo(const ::wstring& s, ostream* os) {\n  PrintCharsAsStringTo(s.data(), s.size(), os);\n}\n#endif  // GTEST_HAS_GLOBAL_WSTRING\n\n#if GTEST_HAS_STD_WSTRING\nvoid PrintWideStringTo(const ::std::wstring& s, ostream* os) {\n  PrintCharsAsStringTo(s.data(), s.size(), os);\n}\n#endif  // GTEST_HAS_STD_WSTRING\n\n}  // namespace internal\n\n}  // namespace testing\n// Copyright 2008, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: mheule@google.com (Markus Heule)\n//\n// The Google C++ Testing Framework (Google Test)\n\n\n// Indicates that this translation unit is part of Google Test's\n// implementation.  It must come before gtest-internal-inl.h is\n// included, or there will be a compiler error.  This trick exists to\n// prevent the accidental inclusion of gtest-internal-inl.h in the\n// user's code.\n#define GTEST_IMPLEMENTATION_ 1\n#undef GTEST_IMPLEMENTATION_\n\nnamespace testing {\n\nusing internal::GetUnitTestImpl;\n\n// Gets the summary of the failure message by omitting the stack trace\n// in it.\nstd::string TestPartResult::ExtractSummary(const char* message) {\n  const char* const stack_trace = strstr(message, internal::kStackTraceMarker);\n  return stack_trace == NULL ? message :\n      std::string(message, stack_trace);\n}\n\n// Prints a TestPartResult object.\nstd::ostream& operator<<(std::ostream& os, const TestPartResult& result) {\n  return os\n      << result.file_name() << \":\" << result.line_number() << \": \"\n      << (result.type() == TestPartResult::kSuccess ? \"Success\" :\n          result.type() == TestPartResult::kFatalFailure ? \"Fatal failure\" :\n          \"Non-fatal failure\") << \":\\n\"\n      << result.message() << std::endl;\n}\n\n// Appends a TestPartResult to the array.\nvoid TestPartResultArray::Append(const TestPartResult& result) {\n  array_.push_back(result);\n}\n\n// Returns the TestPartResult at the given index (0-based).\nconst TestPartResult& TestPartResultArray::GetTestPartResult(int index) const {\n  if (index < 0 || index >= size()) {\n    printf(\"\\nInvalid index (%d) into TestPartResultArray.\\n\", index);\n    internal::posix::Abort();\n  }\n\n  return array_[index];\n}\n\n// Returns the number of TestPartResult objects in the array.\nint TestPartResultArray::size() const {\n  return static_cast<int>(array_.size());\n}\n\nnamespace internal {\n\nHasNewFatalFailureHelper::HasNewFatalFailureHelper()\n    : has_new_fatal_failure_(false),\n      original_reporter_(GetUnitTestImpl()->\n                         GetTestPartResultReporterForCurrentThread()) {\n  GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this);\n}\n\nHasNewFatalFailureHelper::~HasNewFatalFailureHelper() {\n  GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(\n      original_reporter_);\n}\n\nvoid HasNewFatalFailureHelper::ReportTestPartResult(\n    const TestPartResult& result) {\n  if (result.fatally_failed())\n    has_new_fatal_failure_ = true;\n  original_reporter_->ReportTestPartResult(result);\n}\n\n}  // namespace internal\n\n}  // namespace testing\n// Copyright 2008 Google Inc.\n// All Rights Reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Author: wan@google.com (Zhanyong Wan)\n\n\nnamespace testing {\nnamespace internal {\n\n#if GTEST_HAS_TYPED_TEST_P\n\n// Skips to the first non-space char in str. Returns an empty string if str\n// contains only whitespace characters.\nstatic const char* SkipSpaces(const char* str) {\n  while (IsSpace(*str))\n    str++;\n  return str;\n}\n\nstatic std::vector<std::string> SplitIntoTestNames(const char* src) {\n  std::vector<std::string> name_vec;\n  src = SkipSpaces(src);\n  for (; src != NULL; src = SkipComma(src)) {\n    name_vec.push_back(StripTrailingSpaces(GetPrefixUntilComma(src)));\n  }\n  return name_vec;\n}\n\n// Verifies that registered_tests match the test names in\n// registered_tests_; returns registered_tests if successful, or\n// aborts the program otherwise.\nconst char* TypedTestCasePState::VerifyRegisteredTestNames(\n    const char* file, int line, const char* registered_tests) {\n  typedef RegisteredTestsMap::const_iterator RegisteredTestIter;\n  registered_ = true;\n\n  std::vector<std::string> name_vec = SplitIntoTestNames(registered_tests);\n\n  Message errors;\n\n  std::set<std::string> tests;\n  for (std::vector<std::string>::const_iterator name_it = name_vec.begin();\n       name_it != name_vec.end(); ++name_it) {\n    const std::string& name = *name_it;\n    if (tests.count(name) != 0) {\n      errors << \"Test \" << name << \" is listed more than once.\\n\";\n      continue;\n    }\n\n    bool found = false;\n    for (RegisteredTestIter it = registered_tests_.begin();\n         it != registered_tests_.end();\n         ++it) {\n      if (name == it->first) {\n        found = true;\n        break;\n      }\n    }\n\n    if (found) {\n      tests.insert(name);\n    } else {\n      errors << \"No test named \" << name\n             << \" can be found in this test case.\\n\";\n    }\n  }\n\n  for (RegisteredTestIter it = registered_tests_.begin();\n       it != registered_tests_.end();\n       ++it) {\n    if (tests.count(it->first) == 0) {\n      errors << \"You forgot to list test \" << it->first << \".\\n\";\n    }\n  }\n\n  const std::string& errors_str = errors.GetString();\n  if (errors_str != \"\") {\n    fprintf(stderr, \"%s %s\", FormatFileLocation(file, line).c_str(),\n            errors_str.c_str());\n    fflush(stderr);\n    posix::Abort();\n  }\n\n  return registered_tests;\n}\n\n#endif  // GTEST_HAS_TYPED_TEST_P\n\n}  // namespace internal\n}  // namespace testing\n"
  },
  {
    "path": "libs/gtest_mpi/include/gtest_mpi/gtest_mpi.hpp",
    "content": "// This project contains source code from the Googletest framework\n// obtained from https://github.com/google/googletest with the following\n// terms:\n//\n// Copyright 2005, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n// ---------------------------------------------------------------------\n//\n// Modifications and additions are published under the following terms:\n//\n// Copyright 2019, Simon Frasch\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of the copyright holder nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n// ---------------------------------------------------------------------\n\n#ifndef GUARD_GTEST_MPI_HPP\n#define GUARD_GTEST_MPI_HPP\n#include <gtest/gtest.h>\n#include <mpi.h>\n#include <unistd.h>\n#include <set>\n#include <string>\n#include \"gtest_mpi_internal.hpp\"\n\nnamespace gtest_mpi {\nnamespace {  // no external linkage\n\nclass MPITestEnvironment : public ::testing::Environment {\npublic:\n  MPITestEnvironment() : ::testing::Environment() {}\n\n  MPITestEnvironment(const MPITestEnvironment&) = delete;\n\n  MPITestEnvironment(MPITestEnvironment&&) = default;\n\n  static MPI_Comm GetComm() { return global_test_comm; }\n\n  void SetUp() override {\n    if (global_test_comm != MPI_COMM_WORLD) {\n      MPI_Comm_free(&global_test_comm);\n      global_test_comm = MPI_COMM_WORLD;\n    }\n    MPI_Comm_dup(MPI_COMM_WORLD, &global_test_comm);\n  }\n\n  void TearDown() override {\n    if (global_test_comm != MPI_COMM_WORLD) {\n      MPI_Comm_free(&global_test_comm);\n      global_test_comm = MPI_COMM_WORLD;\n    }\n  }\n\nprivate:\n  static MPI_Comm global_test_comm;\n};\nMPI_Comm MPITestEnvironment::global_test_comm = MPI_COMM_WORLD;\n\nclass PrettyMPIUnitTestResultPrinter : public ::testing::TestEventListener {\npublic:\n  PrettyMPIUnitTestResultPrinter()\n      : rank_(0),\n        comm_size_(1),\n        comm_(MPITestEnvironment::GetComm()),\n        num_sucessfull_tests_(0),\n        num_failed_tests_(0) {\n    MPI_Comm_rank(comm_, &rank_);\n    MPI_Comm_size(comm_, &comm_size_);\n  }\n\n  // The following methods override what's in the TestEventListener class.\n  void OnTestIterationStart(const ::testing::UnitTest& unit_test, int iteration) override;\n  void OnEnvironmentsSetUpStart(const ::testing::UnitTest& unit_test) override;\n  void OnTestCaseStart(const ::testing::TestCase& test_case) override;\n  void OnTestStart(const ::testing::TestInfo& test_info) override;\n  void OnTestPartResult(const ::testing::TestPartResult& result) override;\n  void OnTestEnd(const ::testing::TestInfo& test_info) override;\n  void OnTestCaseEnd(const ::testing::TestCase& test_case) override;\n  void OnEnvironmentsTearDownStart(const ::testing::UnitTest& unit_test) override;\n  void OnTestIterationEnd(const ::testing::UnitTest& unit_test, int iteration) override;\n\n  void OnEnvironmentsSetUpEnd(const ::testing::UnitTest& /*unit_test*/) override {}\n  void OnEnvironmentsTearDownEnd(const ::testing::UnitTest& /*unit_test*/) override {}\n  void OnTestProgramStart(const ::testing::UnitTest& /*unit_test*/) override {}\n  void OnTestProgramEnd(const ::testing::UnitTest& /*unit_test*/) override {}\n\nprivate:\n  int rank_;\n  int comm_size_;\n  MPI_Comm comm_;\n  TestPartResultCollection failed_results_;\n  int num_sucessfull_tests_;\n  int num_failed_tests_;\n  std::set<int> failed_ranks_;\n  std::vector<TestInfoProperties> failed_test_properties_;\n};\n\n// Taken / modified from Googletest\nvoid PrettyMPIUnitTestResultPrinter::OnTestIterationStart(const ::testing::UnitTest& unit_test,\n                                                          int iteration) {\n  using namespace ::testing;\n  using namespace ::testing::internal;\n  if (rank_ != 0) return;\n\n  if (GTEST_FLAG(repeat) != 1)\n    printf(\"\\nRepeating all tests (iteration %d) . . .\\n\\n\", iteration + 1);\n\n  const char* const filter = GTEST_FLAG(filter).c_str();\n\n  // Prints the filter if it's not *.  This reminds the user that some\n  // tests may be skipped.\n  if (!String::CStringEquals(filter, kUniversalFilter)) {\n    ColoredPrintf(COLOR_YELLOW, \"Note: %s filter = %s\\n\", GTEST_NAME_, filter);\n  }\n\n  if (ShouldShard(kTestTotalShards, kTestShardIndex, false)) {\n    const Int32 shard_index = Int32FromEnvOrDie(kTestShardIndex, -1);\n    ColoredPrintf(COLOR_YELLOW, \"Note: This is test shard %d of %s.\\n\",\n                  static_cast<int>(shard_index) + 1, internal::posix::GetEnv(kTestTotalShards));\n  }\n\n  if (GTEST_FLAG(shuffle)) {\n    ColoredPrintf(COLOR_YELLOW, \"Note: Randomizing tests' orders with a seed of %d .\\n\",\n                  unit_test.random_seed());\n  }\n\n  ColoredPrintf(COLOR_GREEN, \"[==========] \");\n  printf(\"Running %s from %s.\\n\", FormatTestCount(unit_test.test_to_run_count()).c_str(),\n         FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());\n  fflush(stdout);\n}\n\n// Taken / modified from Googletest\nvoid PrettyMPIUnitTestResultPrinter::OnEnvironmentsSetUpStart(\n    const ::testing::UnitTest& /*unit_test*/) {\n  if (rank_ != 0) return;\n  ColoredPrintf(COLOR_GREEN, \"[----------] \");\n  printf(\"Global test environment set-up.\\n\");\n  fflush(stdout);\n}\n\n// Taken / modified from Googletest\nvoid PrettyMPIUnitTestResultPrinter::OnTestCaseStart(const ::testing::TestCase& test_case) {\n  using namespace ::testing;\n  using namespace ::testing::internal;\n  if (rank_ != 0) return;\n  const std::string counts = FormatCountableNoun(test_case.test_to_run_count(), \"test\", \"tests\");\n  ColoredPrintf(COLOR_GREEN, \"[----------] \");\n  printf(\"%s from %s\", counts.c_str(), test_case.name());\n  if (test_case.type_param() == NULL) {\n    printf(\"\\n\");\n  } else {\n    printf(\", where %s = %s\\n\", kTypeParamLabel, test_case.type_param());\n  }\n  fflush(stdout);\n}\n\n// Taken / modified from Googletest\nvoid PrettyMPIUnitTestResultPrinter::OnTestStart(const ::testing::TestInfo& test_info) {\n  if (rank_ != 0) return;\n  ColoredPrintf(COLOR_GREEN, \"[ RUN      ] \");\n  printf(\"%s.%s\", test_info.test_case_name(), test_info.name());\n\n  printf(\"\\n\");\n  fflush(stdout);\n}\n\n// Taken / modified from Googletest\nvoid PrettyMPIUnitTestResultPrinter::OnTestPartResult(const ::testing::TestPartResult& result) {\n  using namespace ::testing;\n  using namespace ::testing::internal;\n  // If the test part succeeded, we don't need to do anything.\n  if (result.type() == TestPartResult::kSuccess) return;\n  failed_results_.Add(result);\n}\n\n// Taken / modified from Googletest\nvoid PrintFailedTestResultCollection(const TestPartResultCollection& collection, int rank) {\n  for (std::size_t i = 0; i < collection.Size(); ++i) {\n    std::string m =\n        (::testing::Message() << \"Rank \" << rank << \": \"\n                              << ::testing::internal::FormatFileLocation(\n                                     collection.file_names.get_str(i), collection.line_numbers[i])\n                              << \" \"\n                              << TestPartResultTypeToString(\n                                     ::testing::TestPartResult::Type(collection.types[i]))\n                              << collection.messages.get_str(i))\n            .GetString();\n    printf(\"%s\\n\", m.c_str());\n    fflush(stdout);\n  }\n}\n\n// Taken / modified from Googletest\nvoid PrettyMPIUnitTestResultPrinter::OnTestEnd(const ::testing::TestInfo& test_info) {\n  using namespace ::testing;\n  using namespace ::testing::internal;\n\n  // check if any ranks failed\n  int failed_locally = failed_results_.Size() > 0;\n  std::vector<int> failed_flags_per_rank;\n  if (rank_ == 0) failed_flags_per_rank.resize(comm_size_);\n  MPI_Gather(&failed_locally, 1, MPI_INT, failed_flags_per_rank.data(), 1, MPI_INT, 0, comm_);\n\n  // failed non-root ranks Send to root and exit\n  if (rank_ != 0) {\n    if (failed_locally) {\n      failed_results_.Send(comm_, 0);\n    }\n    failed_results_.Reset();\n    return;\n  }\n\n  int failed_globally = failed_locally;\n  for (const auto& f : failed_flags_per_rank) {\n    if (f) failed_globally = 1;\n  }\n\n  // print root failure fist\n  if (failed_results_.Size() > 0) {\n    PrintFailedTestResultCollection(failed_results_, rank_);\n    failed_ranks_.insert(0);\n  }\n\n  // receive and print from other failed ranks\n  for (int r = 1; r < comm_size_; ++r) {\n    if (failed_flags_per_rank[r]) {\n      failed_ranks_.insert(r);\n      failed_results_.Recv(comm_, r);\n      PrintFailedTestResultCollection(failed_results_, r);\n    }\n  }\n\n  // Reset result storage before next test\n  failed_results_.Reset();\n\n  if (!failed_globally) {\n    ColoredPrintf(COLOR_GREEN, \"[       OK ] \");\n    ++num_sucessfull_tests_;\n  } else {\n    ColoredPrintf(COLOR_RED, \"[  FAILED  ] \");\n    ++num_failed_tests_;\n    TestInfoProperties prop;\n    if (test_info.name()) prop.name = test_info.name();\n    if (test_info.test_case_name()) prop.case_name = test_info.test_case_name();\n    if (test_info.should_run()) prop.should_run = test_info.should_run();\n    if (test_info.type_param()) prop.type_param = test_info.type_param();\n    if (test_info.value_param()) prop.value_param = test_info.value_param();\n    for (int r = 0; r < comm_size_; ++r) {\n      if (failed_flags_per_rank[r]) {\n        prop.ranks.insert(r);\n      }\n    }\n    failed_test_properties_.emplace_back(std::move(prop));\n  }\n\n  printf(\"%s.%s\", test_info.test_case_name(), test_info.name());\n  if (failed_globally) PrintFullTestCommentIfPresent(test_info);\n\n  if (GTEST_FLAG(print_time)) {\n    printf(\" (%s ms)\\n\", internal::StreamableToString(test_info.result()->elapsed_time()).c_str());\n  } else {\n    printf(\"\\n\");\n  }\n  fflush(stdout);\n}\n\n// Taken / modified from Googletest\nvoid PrettyMPIUnitTestResultPrinter::OnTestCaseEnd(const ::testing::TestCase& test_case) {\n  using namespace ::testing;\n  if (!GTEST_FLAG(print_time) || rank_ != 0) return;\n\n  const std::string counts = FormatCountableNoun(test_case.test_to_run_count(), \"test\", \"tests\");\n  ColoredPrintf(COLOR_GREEN, \"[----------] \");\n  printf(\"%s from %s (%s ms total)\\n\\n\", counts.c_str(), test_case.name(),\n         internal::StreamableToString(test_case.elapsed_time()).c_str());\n  fflush(stdout);\n}\n\nstatic std::string FormatSet(const std::set<int>& s) {\n  std::string res;\n  for (const auto& val : s) {\n    res += std::to_string(val);\n    if (val != *(--s.end())) {\n      res += \", \";\n    }\n  }\n  // res.resize(res.size() - 2); // remove last comma\n  return res;\n}\n\n// Taken / modified from Googletest\nstatic void PrintFullTestCommentIfPresent(const std::string& type_param,\n                                          const std::string& value_param) {\n  if (!type_param.empty() || !value_param.empty()) {\n    printf(\", where \");\n    if (!type_param.empty()) {\n      printf(\"%s = %s\", kTypeParamLabel, type_param.c_str());\n      if (!value_param.empty()) printf(\" and \");\n    }\n    if (!value_param.empty()) {\n      printf(\"%s = %s\", kValueParamLabel, value_param.c_str());\n    }\n    printf(\",\");\n  }\n}\n\n// Taken / modified from Googletest\nvoid PrettyMPIUnitTestResultPrinter::OnTestIterationEnd(const ::testing::UnitTest& unit_test,\n                                                        int /*iteration*/) {\n  using namespace ::testing;\n  failed_results_.Reset();\n  if (rank_ != 0) {\n    return;\n  }\n\n  ColoredPrintf(COLOR_GREEN, \"[==========] \");\n  printf(\"%s from %s ran on %d ranks.\", FormatTestCount(unit_test.test_to_run_count()).c_str(),\n         FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str(), comm_size_);\n  if (GTEST_FLAG(print_time)) {\n    printf(\" (%s ms total)\", internal::StreamableToString(unit_test.elapsed_time()).c_str());\n  }\n  printf(\"\\n\");\n  ColoredPrintf(COLOR_GREEN, \"[  PASSED  ] \");\n  printf(\"%s.\\n\", FormatTestCount(num_sucessfull_tests_).c_str());\n\n  if (num_failed_tests_) {\n    ColoredPrintf(COLOR_RED, \"[  FAILED  ] \");\n    printf(\"%s, listed below:\\n\", FormatTestCount(num_failed_tests_).c_str());\n    for (const auto& prop : failed_test_properties_) {\n      if (!prop.should_run) continue;\n      ColoredPrintf(COLOR_RED, \"[  FAILED  ] \");\n      printf(\"%s.%s\", prop.case_name.c_str(), prop.name.c_str());\n      PrintFullTestCommentIfPresent(prop.type_param, prop.value_param);\n      printf(\" on ranks [%s]\", FormatSet(prop.ranks).c_str());\n      printf(\"\\n\");\n    }\n  }\n\n  int num_disabled = unit_test.reportable_disabled_test_count();\n  if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) {\n    if (!num_failed_tests_) {\n      printf(\"\\n\");  // Add a spacer if no FAILURE banner is displayed.\n    }\n    ColoredPrintf(COLOR_YELLOW, \"  YOU HAVE %d DISABLED %s\\n\\n\", num_disabled,\n                  num_disabled == 1 ? \"TEST\" : \"TESTS\");\n  }\n  // Ensure that Google Test output is printed before, e.g., heapchecker output.\n  fflush(stdout);\n}\n\n// Taken / modified from Googletest\nvoid PrettyMPIUnitTestResultPrinter::OnEnvironmentsTearDownStart(\n    const ::testing::UnitTest& /*unit_test*/) {\n  if (rank_ != 0) return;\n  ColoredPrintf(COLOR_GREEN, \"[----------] \");\n  printf(\"Global MPI test environment tear-down\\n\");\n  fflush(stdout);\n}\n\n}  // anonymous namespace\n}  // namespace gtest_mpi\n\n#endif\n\n"
  },
  {
    "path": "libs/gtest_mpi/include/gtest_mpi/gtest_mpi_internal.hpp",
    "content": "// This project contains source code from the Googletest framework\n// obtained from https://github.com/google/googletest with the following\n// terms:\n//\n// Copyright 2005, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n// ---------------------------------------------------------------------\n//\n// Modifications and additions are published under the following terms:\n//\n// Copyright 2019, Simon Frasch\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of the copyright holder nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n// ---------------------------------------------------------------------\n\n#ifndef GUARD_GTEST_MPI_INTERNAL_HPP\n#define GUARD_GTEST_MPI_INTERNAL_HPP\n#include <gtest/gtest.h>\n#include <mpi.h>\n#include <unistd.h>\n#include <cstdarg>\n#include <string>\n\nnamespace gtest_mpi {\nnamespace { // no external linkage\n\n// Taken / modified from Googletest\nstatic const char kDisableTestFilter[] = \"DISABLED_*:*/DISABLED_*\";\nstatic const char kDeathTestCaseFilter[] = \"*DeathTest:*DeathTest/*\";\nstatic const char kUniversalFilter[] = \"*\";\nstatic const char kDefaultOutputFormat[] = \"xml\";\nstatic const char kDefaultOutputFile[] = \"test_detail\";\nstatic const char kTestShardIndex[] = \"GTEST_SHARD_INDEX\";\nstatic const char kTestTotalShards[] = \"GTEST_TOTAL_SHARDS\";\nstatic const char kTestShardStatusFile[] = \"GTEST_SHARD_STATUS_FILE\";\nstatic const char kTypeParamLabel[] = \"TypeParam\";\nstatic const char kValueParamLabel[] = \"GetParam()\";\n\n// Taken / modified from Googletest\nenum GTestColor { COLOR_DEFAULT, COLOR_RED, COLOR_GREEN, COLOR_YELLOW };\n\n// Taken / modified from Googletest\nstatic void PrintFullTestCommentIfPresent(const ::testing::TestInfo& test_info) {\n  const char* const type_param = test_info.type_param();\n  const char* const value_param = test_info.value_param();\n\n  if (type_param != NULL || value_param != NULL) {\n    printf(\", where \");\n    if (type_param != NULL) {\n      printf(\"%s = %s\", kTypeParamLabel, type_param);\n      if (value_param != NULL) printf(\" and \");\n    }\n    if (value_param != NULL) {\n      printf(\"%s = %s\", kValueParamLabel, value_param);\n    }\n  }\n}\n\n// Taken / modified from Googletest\nbool ShouldUseColor(bool stdout_is_tty) {\n  using namespace ::testing;\n  using namespace ::testing::internal;\n  const char* const gtest_color = GTEST_FLAG(color).c_str();\n\n  if (String::CaseInsensitiveCStringEquals(gtest_color, \"auto\")) {\n#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MINGW\n    // On Windows the TERM variable is usually not set, but the\n    // console there does support colors.\n    return stdout_is_tty;\n#else\n    // On non-Windows platforms, we rely on the TERM variable.\n    const char* const term = getenv(\"TERM\");\n    const bool term_supports_color =\n        String::CStringEquals(term, \"xterm\") || String::CStringEquals(term, \"xterm-color\") ||\n        String::CStringEquals(term, \"xterm-256color\") || String::CStringEquals(term, \"screen\") ||\n        String::CStringEquals(term, \"screen-256color\") || String::CStringEquals(term, \"tmux\") ||\n        String::CStringEquals(term, \"tmux-256color\") ||\n        String::CStringEquals(term, \"rxvt-unicode\") ||\n        String::CStringEquals(term, \"rxvt-unicode-256color\") ||\n        String::CStringEquals(term, \"linux\") || String::CStringEquals(term, \"cygwin\");\n    return stdout_is_tty && term_supports_color;\n#endif // GTEST_OS_WINDOWS\n  }\n\n  return String::CaseInsensitiveCStringEquals(gtest_color, \"yes\") ||\n         String::CaseInsensitiveCStringEquals(gtest_color, \"true\") ||\n         String::CaseInsensitiveCStringEquals(gtest_color, \"t\") ||\n         String::CStringEquals(gtest_color, \"1\");\n  // We take \"yes\", \"true\", \"t\", and \"1\" as meaning \"yes\".  If the\n  // value is neither one of these nor \"auto\", we treat it as \"no\" to\n  // be conservative.\n}\n\n// Taken / modified from Googletest\nstatic const char* GetAnsiColorCode(GTestColor color) {\n  switch (color) {\n    case COLOR_RED:\n      return \"1\";\n    case COLOR_GREEN:\n      return \"2\";\n    case COLOR_YELLOW:\n      return \"3\";\n    default:\n      return NULL;\n  };\n}\n\n// Taken / modified from Googletest\nstatic void ColoredPrintf(GTestColor color, const char* fmt, ...) {\n  va_list args;\n  va_start(args, fmt);\n\n  static const bool in_color_mode = ShouldUseColor(isatty(fileno(stdout)) != 0);\n  const bool use_color = in_color_mode && (color != COLOR_DEFAULT);\n\n  if (!use_color) {\n    vprintf(fmt, args);\n    va_end(args);\n    return;\n  }\n\n  printf(\"\\033[0;3%sm\", GetAnsiColorCode(color));\n  vprintf(fmt, args);\n  printf(\"\\033[m\"); // Resets the terminal to default.\n  va_end(args);\n}\n\n// Taken / modified from Googletest\n::testing::internal::Int32 Int32FromEnvOrDie(const char* var,\n                                             ::testing::internal::Int32 default_val) {\n  using namespace ::testing;\n  using namespace ::testing::internal;\n  const char* str_val = getenv(var);\n  if (str_val == NULL) {\n    return default_val;\n  }\n\n  Int32 result;\n  if (!ParseInt32(Message() << \"The value of environment variable \" << var, str_val, &result)) {\n    exit(EXIT_FAILURE);\n  }\n  return result;\n}\n\n// Taken / modified from Googletest\nstatic std::string FormatCountableNoun(int count, const char* singular_form,\n                                       const char* plural_form) {\n  using namespace ::testing;\n  return internal::StreamableToString(count) + \" \" + (count == 1 ? singular_form : plural_form);\n}\n\n// Taken / modified from Googletest\nstatic std::string FormatTestCount(int test_count) {\n  return FormatCountableNoun(test_count, \"test\", \"tests\");\n}\n\n// Taken / modified from Googletest\nstatic std::string FormatTestCaseCount(int test_case_count) {\n  return FormatCountableNoun(test_case_count, \"test case\", \"test cases\");\n}\n\n// Taken / modified from Googletest\nstatic const char* TestPartResultTypeToString(::testing::TestPartResult::Type type) {\n  switch (type) {\n    case ::testing::TestPartResult::kSuccess:\n      return \"Success\";\n\n    case ::testing::TestPartResult::kNonFatalFailure:\n    case ::testing::TestPartResult::kFatalFailure:\n#ifdef _MSC_VER\n      return \"error: \";\n#else\n      return \"Failure\\n\";\n#endif\n    default:\n      return \"Unknown result type\";\n  }\n}\n\n// Taken / modified from Googletest\nbool ShouldShard(const char* total_shards_env, const char* shard_index_env,\n                 bool in_subprocess_for_death_test) {\n  using namespace ::testing;\n  using namespace ::testing::internal;\n  if (in_subprocess_for_death_test) {\n    return false;\n  }\n\n  const Int32 total_shards = Int32FromEnvOrDie(total_shards_env, -1);\n  const Int32 shard_index = Int32FromEnvOrDie(shard_index_env, -1);\n\n  if (total_shards == -1 && shard_index == -1) {\n    return false;\n  } else if (total_shards == -1 && shard_index != -1) {\n    const Message msg = Message() << \"Invalid environment variables: you have \" << kTestShardIndex\n                                  << \" = \" << shard_index << \", but have left \" << kTestTotalShards\n                                  << \" unset.\\n\";\n    ColoredPrintf(COLOR_RED, msg.GetString().c_str());\n    fflush(stdout);\n    exit(EXIT_FAILURE);\n  } else if (total_shards != -1 && shard_index == -1) {\n    const Message msg = Message() << \"Invalid environment variables: you have \" << kTestTotalShards\n                                  << \" = \" << total_shards << \", but have left \" << kTestShardIndex\n                                  << \" unset.\\n\";\n    ColoredPrintf(COLOR_RED, msg.GetString().c_str());\n    fflush(stdout);\n    exit(EXIT_FAILURE);\n  } else if (shard_index < 0 || shard_index >= total_shards) {\n    const Message msg =\n        Message() << \"Invalid environment variables: we require 0 <= \" << kTestShardIndex << \" < \"\n                  << kTestTotalShards << \", but you have \" << kTestShardIndex << \"=\" << shard_index\n                  << \", \" << kTestTotalShards << \"=\" << total_shards << \".\\n\";\n    ColoredPrintf(COLOR_RED, msg.GetString().c_str());\n    fflush(stdout);\n    exit(EXIT_FAILURE);\n  }\n\n  return total_shards > 1;\n}\n\n// info from TestInfo, which does not have a copy constructor\nstruct TestInfoProperties {\n  std::string name;\n  std::string case_name;\n  std::string type_param;\n  std::string value_param;\n  bool should_run;\n  std::set<int> ranks;\n};\n\n// Holds null terminated strings in a single vector,\n// which can be exchanged in a single MPI call\nclass StringCollection {\npublic:\n  void Add(const char* s) {\n    int size = 0;\n    for (; *s != '\\0'; ++s, ++size) {\n      text.push_back(*s);\n    }\n    text.push_back('\\0');\n    start_indices.push_back(prev_size);\n    prev_size = size + 1;\n  }\n\n  // Sends content to requested rank\n  void Send(MPI_Comm comm, int rank) const {\n    MPI_Send(text.data(), text.size(), MPI_CHAR, rank, 0, comm);\n    MPI_Send(start_indices.data(), start_indices.size(), MPI_INT, rank, 0, comm);\n  }\n\n  // Overrides content with data from requested rank\n  void Recv(MPI_Comm comm, int rank) {\n    MPI_Status status;\n    int count = 0;\n\n    // Recv text\n    MPI_Probe(rank, 0, comm, &status);\n    MPI_Get_count(&status, MPI_CHAR, &count);\n    text.resize(count);\n    MPI_Recv(text.data(), count, MPI_CHAR, rank, 0, comm, MPI_STATUS_IGNORE);\n\n    // Recv sizes\n    MPI_Probe(rank, 0, comm, &status);\n    MPI_Get_count(&status, MPI_INT, &count);\n    start_indices.resize(count);\n    MPI_Recv(start_indices.data(), count, MPI_INT, rank, 0, comm, MPI_STATUS_IGNORE);\n  }\n\n  void Reset() {\n    text.clear();\n    start_indices.clear();\n    prev_size = 0;\n  }\n\n  const char* get_str(const int id) const { return text.data() + start_indices[id]; }\n\n  const std::size_t Size() const { return start_indices.size(); }\n\nprivate:\n  int prev_size = 0;\n  std::vector<char> text;\n  std::vector<int> start_indices;\n};\n\n// All info recuired to print a failed test result.\n// Includes functionality for MPI exchange\nstruct TestPartResultCollection {\n  // Sends content to requested rank\n  void Send(MPI_Comm comm, int rank) {\n    MPI_Send(types.data(), types.size(), MPI_INT, rank, 0, comm);\n    MPI_Send(line_numbers.data(), line_numbers.size(), MPI_INT, rank, 0, comm);\n    summaries.Send(comm, rank);\n    messages.Send(comm, rank);\n    file_names.Send(comm, rank);\n  }\n\n  // Overrides content with data from requested rank\n  void Recv(MPI_Comm comm, int rank) {\n    MPI_Status status;\n    int count = 0;\n\n    // Recv text\n    MPI_Probe(rank, 0, comm, &status);\n    MPI_Get_count(&status, MPI_INT, &count);\n    types.resize(count);\n    MPI_Recv(types.data(), count, MPI_INT, rank, 0, comm, MPI_STATUS_IGNORE);\n\n    // Recv sizes\n    MPI_Probe(rank, 0, comm, &status);\n    MPI_Get_count(&status, MPI_INT, &count);\n    line_numbers.resize(count);\n    MPI_Recv(line_numbers.data(), count, MPI_INT, rank, 0, comm, MPI_STATUS_IGNORE);\n\n    summaries.Recv(comm, rank);\n    messages.Recv(comm, rank);\n    file_names.Recv(comm, rank);\n  }\n\n  void Add(const ::testing::TestPartResult& result) {\n    types.push_back(result.type());\n    line_numbers.push_back(result.line_number());\n    summaries.Add(result.summary());\n    messages.Add(result.message());\n    file_names.Add(result.file_name());\n  }\n\n  void Reset() {\n    types.clear();\n    line_numbers.clear();\n    summaries.Reset();\n    messages.Reset();\n    file_names.Reset();\n  }\n\n  std::size_t Size() const { return types.size(); }\n\n  std::vector<int> types;\n  std::vector<int> line_numbers;\n  StringCollection summaries;\n  StringCollection messages;\n  StringCollection file_names;\n};\n\n} // anonymous namespace\n} // namespace gtest_mpi\n#endif\n\n"
  },
  {
    "path": "miniapp/CMakeLists.txt",
    "content": "################\n#  Build test  #\n################\nset(executables \"layout_miniapp\" \"cosma_miniapp\" \"cosma_statistics\")\n\nforeach(exec ${executables})\n    add_executable(${exec} \"${exec}.cpp\")\n    target_link_libraries(${exec} PRIVATE cosma cxxopts::cxxopts)\n    install(TARGETS ${exec} RUNTIME DESTINATION \"${CMAKE_INSTALL_PREFIX}/bin\")\nendforeach()\n\nif(NOT COSMA_SCALAPACK MATCHES \"OFF\")\n    set(scalapack_executables \"pxgemm_miniapp\")\n    foreach(exec ${scalapack_executables})\n        add_executable(${exec} \"${exec}.cpp\")\n        target_link_libraries(${exec} PRIVATE cosma_pxgemm_cpp cxxopts::cxxopts)\n        install(TARGETS ${exec} RUNTIME DESTINATION \"${CMAKE_INSTALL_PREFIX}/bin\")\n        if (COSMA_WITH_PROFILING)\n            target_link_libraries(${exec} PRIVATE semiprof::semiprof)\n            target_compile_definitions(${exec} PRIVATE COSMA_WITH_PROFILING)\n        endif()\n    endforeach()\nendif()\n"
  },
  {
    "path": "miniapp/cosma_miniapp.cpp",
    "content": "#include <cosma/multiply.hpp>\n\n#include <algorithm>\n#include <cctype>\n#include <chrono>\n#include <cstdlib>\n#include <fstream>\n#include <iostream>\n#include <limits>\n#include <sstream>\n#include <string>\n#include <vector>\n#include \"../utils/parse_strategy.hpp\"\n#include \"../utils/cosma_utils.hpp\"\n\n#include <cxxopts.hpp>\n\nusing namespace cosma;\n\ntemplate <typename T>\nvoid fill_int(T* ptr, size_t size) {\n    for (unsigned i = 0u; i < size; ++i) {\n        ptr[i] = 10*drand48();\n    }\n}\n\ntemplate <typename T>\nvoid output_matrix(CosmaMatrix<T> &M, int rank) {\n    std::string local = M.which_matrix() + std::to_string(rank) + \".txt\";\n    std::ofstream local_file(local);\n    local_file << M << std::endl;\n    local_file.close();\n}\n\ntemplate <typename T>\nbool run(const int m, const int n, const int k, \n         const std::vector<std::string>& steps, \n         long& timing, const bool test_correctness,\n         MPI_Comm comm = MPI_COMM_WORLD) {\n    int rank, size;\n    MPI_Comm_rank(comm, &rank);\n    MPI_Comm_size(comm, &size);\n\n    // specified by the environment variable COSMA_CPU_MAX_MEMORY\n    long long memory_limit = cosma::get_cpu_max_memory<T>();\n\n    if (!test_correctness) {\n        // specified by the env var COSMA_OVERLAP_COMM_AND_COMP\n        bool overlap_comm_and_comp = cosma::get_overlap_comm_and_comp();\n        const Strategy& strategy = parse_strategy(m, n, k, size,\n                                                  steps,\n                                                  memory_limit,\n                                                  overlap_comm_and_comp);\n\n        if (rank == 0) {\n            std::cout << \"Strategy = \" << strategy << std::endl;\n        }\n\n        // Declare A,B and C COSMA matrices objects\n        CosmaMatrix<T> A('A', strategy, rank);\n        CosmaMatrix<T> B('B', strategy, rank);\n        CosmaMatrix<T> C('C', strategy, rank);\n\n        T alpha{1};\n        T beta{0};\n\n        // fill the matrices with random data\n        srand48(rank);\n        fill_int(A.matrix_pointer(), A.matrix_size());\n        fill_int(B.matrix_pointer(), B.matrix_size());\n\n        MPI_Barrier(comm);\n        auto start = std::chrono::steady_clock::now();\n        multiply(A, B, C, strategy, comm, alpha, beta);\n        MPI_Barrier(comm);\n        auto end = std::chrono::steady_clock::now();\n\n        timing \n            = std::chrono::duration_cast<std::chrono::milliseconds>(end - start)\n            .count();\n\n        return true;\n    } else {\n        // specified by the env var COSMA_OVERLAP_COMM_AND_COMP\n        const Strategy& strategy_no_overlap = parse_strategy(m, n, k, size,\n                                                  steps,\n                                                  memory_limit,\n                                                  false);\n        const Strategy& strategy_with_overlap = parse_strategy(m, n, k, size,\n                                                  steps,\n                                                  memory_limit,\n                                                  true);\n        if (rank == 0) {\n            std::cout << \"Strategy = \" << strategy_no_overlap << std::endl;\n        }\n\n        auto ctx = cosma::make_context<T>();\n\n        // first run without overlapping communication and computation\n        bool isOK = test_cosma<T>(strategy_no_overlap, ctx, comm);\n        // then run with the overlap of communication and computation\n        isOK = isOK && test_cosma<T>(strategy_with_overlap, ctx, comm);\n\n        return rank == 0 ? isOK : true;\n    }\n}\n\nint main(int argc, char **argv) {\n    cxxopts::Options options(\"COSMA MINIAPP\", \n        \"A miniapp computing: `C=A*B, where dim(A)=m*k, dim(B)=k*n, dim(C)=m*n\");\n    options.add_options()\n        (\"m,m_dim\",\n            \"number of rows of A and C.\", \n            cxxopts::value<int>()->default_value(\"1000\"))\n        (\"n,n_dim\",\n            \"number of columns of B and C.\",\n            cxxopts::value<int>()->default_value(\"1000\"))\n        (\"k,k_dim\",\n            \"number of columns of A and rows of B.\", \n            cxxopts::value<int>()->default_value(\"1000\"))\n        (\"s,steps\", \n            \"Division steps that the algorithm should perform.\",\n            cxxopts::value<std::vector<std::string>>()->default_value(\"\"))\n        (\"r,n_rep\",\n            \"number of repetitions.\", \n            cxxopts::value<int>()->default_value(\"2\"))\n        (\"t,type\",\n            \"data type of matrix entries.\",\n            cxxopts::value<std::string>()->default_value(\"double\"))\n        (\"test\",\n            \"test the result correctness.\",\n            cxxopts::value<bool>()->default_value(\"false\"))\n        (\"h,help\", \"Print usage.\")\n        ;\n\n    auto result = options.parse(argc, argv);\n    if (result.count(\"help\")) {\n        std::cout << options.help() << std::endl;\n        return 0;\n    }\n\n    auto m = result[\"m_dim\"].as<int>();\n    auto n = result[\"n_dim\"].as<int>();\n    auto k = result[\"k_dim\"].as<int>();\n    auto steps = result[\"steps\"].as<std::vector<std::string>>();\n    auto n_rep = result[\"n_rep\"].as<int>();\n    auto type = result[\"type\"].as<std::string>();\n    // transform to lower-case\n    std::transform(type.begin(), type.end(), type.begin(), \n        [&](char c) {\n            return std::tolower(c);\n        }\n    );\n    // check if the type option takes a correct value\n    std::unordered_set<std::string> type_options = {\n        \"float\", \"double\", \"zfloat\", \"zdouble\"\n    };\n    if (type_options.find(type) == type_options.end()) {\n        std::cout << \"COSMA (cosma_miniapp.cpp): ERROR: --type option: can only take the following values: \" << std::endl;\n        for (const auto& el : type_options) {\n            std::cout << el << \", \";\n        }\n        std::cout << std::endl;\n        return 0;\n    }\n\n    bool test_correctness = result[\"test\"].as<bool>();\n    // some basic checks\n    if (test_correctness) {\n        // if testing correctness, n_rep = 1;\n        n_rep = 1;\n        std::cout << \"COSMA(cosma_miniapp.cpp): WARNING: correctness checking enabled, setting `n_rep` to 1.\" << std::endl;\n    }\n\n    MPI_Init(&argc, &argv);\n\n    int P, rank;\n    MPI_Comm_size(MPI_COMM_WORLD, &P);\n    MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n\n    bool result_correct = true;\n\n    std::vector<long> times;\n    for (int i = 0; i < n_rep; ++i) {\n        long t_run = 0;\n        try {\n            if (type == \"double\") {\n                result_correct = \n                run<double>(m, n, k, steps, \n                            t_run, test_correctness, MPI_COMM_WORLD);\n            } else if (type == \"float\") {\n                result_correct = \n                run<float>(m, n, k, steps, \n                           t_run, test_correctness, MPI_COMM_WORLD);\n            } else if (type == \"zdouble\") {\n                result_correct = \n                run<std::complex<double>>(m, n, k, steps, \n                                          t_run, test_correctness, MPI_COMM_WORLD);\n            } else if (type == \"zfloat\") {\n                result_correct = \n                run<std::complex<float>>(m, n, k, steps, \n                                         t_run, test_correctness, MPI_COMM_WORLD);\n            } else {\n                throw std::runtime_error(\"COSMA(cosma_miniapp): unknown data type of matrix entries.\");\n            }\n        } catch (const std::exception& e) {\n            int flag = 0;\n            MPI_Finalized(&flag);\n            if (!flag) {\n                MPI_Abort(MPI_COMM_WORLD, -1);\n                MPI_Finalize();\n            }\n            return 0;\n        }\n        times.push_back(t_run);\n    }\n    std::sort(times.begin(), times.end());\n\n    // time is only measured if correctness checking was disabled\n    if (!test_correctness && rank == 0) {\n        std::cout << \"COSMA TIMES [ms] = \";\n        for (auto &time : times) {\n            std::cout << time << \" \";\n        }\n        std::cout << std::endl;\n    }\n\n    if (test_correctness && rank == 0) {\n        std::string yes_no = result_correct ? \"\" : \" NOT\";\n        std::cout << \"Result is\" << yes_no << \" CORRECT!\" << std::endl;\n    }\n\n    MPI_Finalize();\n\n    return 0;\n}\n"
  },
  {
    "path": "miniapp/cosma_statistics.cpp",
    "content": "/*\nSimulates the algorithm (without actually computing the matrix multiplication)\n * in order to get the total volume of the communication, the maximum volume of computation\n * done in a single branch and the maximum required buffer size that the algorithm requires.\n */\n#include \"../utils/parse_strategy.hpp\"\n#include <cosma/statistics.hpp>\n#include <cxxopts.hpp>\n\n#include <iostream>\n\nusing namespace cosma;\n\nint main( int argc, char **argv ) {\n    cxxopts::Options options(\"COSMA STATISTICS\",\n                             \"A miniapp computing communication volume \\\n                             and local multiplication sizes. dim(A)=m*k, dim(B)=k*n; dim(C)=m*n.\");\n    options.add_options()\n        (\"m,m_dim\",\n            \"number of rows of A and C.\", \n            cxxopts::value<int>()->default_value(\"1000\"))\n        (\"n,n_dim\",\n            \"number of columns of B and C.\",\n            cxxopts::value<int>()->default_value(\"1000\"))\n        (\"k,k_dim\",\n            \"number of columns of A and rows of B.\", \n            cxxopts::value<int>()->default_value(\"1000\"))\n        (\"P,n_proc\",\n            \"Number of MPI ranks.\", \n            cxxopts::value<int>()->default_value(\"1\"))\n        (\"s,steps\", \n            \"Division steps that the algorithm should perform.\",\n            cxxopts::value<std::vector<std::string>>()->default_value(\"\"))\n        (\"h,help\", \"Print usage.\")\n    ;\n\n    auto result = options.parse(argc, argv);\n\n    auto m = result[\"m_dim\"].as<int>();\n    auto n = result[\"n_dim\"].as<int>();\n    auto k = result[\"k_dim\"].as<int>();\n    auto P = result[\"n_proc\"].as<int>();\n    auto steps = result[\"steps\"].as<std::vector<std::string>>();\n\n    bool overlap_comm_and_comp = cosma::get_overlap_comm_and_comp();\n    long long memory_limit = cosma::get_cpu_max_memory<double>();\n\n    const Strategy& strategy = parse_strategy(m, n, k, P,\n                                              steps,\n                                              memory_limit,\n                                              overlap_comm_and_comp);\n\n    std::cout << \"Strategy: \\n\" << strategy << std::endl;\n\n    int n_rep = 1;\n    multiply(strategy, n_rep);\n\n    return 0;\n}\n"
  },
  {
    "path": "miniapp/layout_miniapp.cpp",
    "content": "#include <cosma/multiply.hpp>\n\n#include <algorithm>\n#include <cctype>\n#include <chrono>\n#include <cstdlib>\n#include <fstream>\n#include <iostream>\n#include <limits>\n#include <sstream>\n#include <string>\n#include <stdlib.h>\n#include <vector>\n\n\n#include \"../utils/parse_strategy.hpp\"\n#include \"../utils/cosma_utils.hpp\"\n\n#include <cxxopts.hpp>\n\nusing namespace cosma;\n\nint main(int argc, char **argv) {\n    cxxopts::Options options(\"NATIVE COSMA LAYOUT MINIAPP\", \n        \"A miniapp showing the native COSMA data layout for computing C=A*B, where dim(A)=m*k, dim(B)=k*n, dim(C)=m*n\");\n    options.add_options()\n        (\"m,m_dim\",\n            \"number of rows of A and C.\", \n            cxxopts::value<int>()->default_value(\"1000\"))\n        (\"n,n_dim\",\n            \"number of columns of B and C.\",\n            cxxopts::value<int>()->default_value(\"1000\"))\n        (\"k,k_dim\",\n            \"number of columns of A and rows of B.\", \n            cxxopts::value<int>()->default_value(\"1000\"))\n        (\"P,n_ranks\",\n            \"number of MPI ranks.\", \n            cxxopts::value<int>()->default_value(\"1\"))\n        (\"s,steps\", \n            \"Division steps that the algorithm should perform. Can be empty.\",\n            cxxopts::value<std::vector<std::string>>()->default_value(\"\"))\n        (\"h,help\", \"Print usage.\")\n        ;\n\n    auto result = options.parse(argc, argv);\n    if (result.count(\"help\")) {\n        std::cout << options.help() << std::endl;\n        return 0;\n    }\n\n    auto m = result[\"m_dim\"].as<int>();\n    auto n = result[\"n_dim\"].as<int>();\n    auto k = result[\"k_dim\"].as<int>();\n    auto P = result[\"n_ranks\"].as<int>();\n    auto steps = result[\"steps\"].as<std::vector<std::string>>();\n\n    // prevent the optimization that might reduce the number of ranks\n    std::string set_min_local_size = \"COSMA_MIN_LOCAL_DIMENSION=1\";\n    putenv(&set_min_local_size[0]);\n\n    // specified by the environment variable COSMA_CPU_MAX_MEMORY\n    long long memory_limit = cosma::get_cpu_max_memory<double>();\n\n    // specified by the env var COSMA_OVERLAP_COMM_AND_COMP\n    bool overlap_comm_and_comp = cosma::get_overlap_comm_and_comp();\n    const Strategy& strategy = parse_strategy(m, n, k, P,\n                                              steps,\n                                              memory_limit,\n                                              overlap_comm_and_comp);\n\n    std::cout << \"Strategy = \" << strategy << std::endl;\n\n    int rank = 0;\n\n    // Declare A,B and C COSMA matrices objects\n    CosmaMatrix<double> A('A', strategy, rank);\n    CosmaMatrix<double> B('B', strategy, rank);\n    CosmaMatrix<double> C('C', strategy, rank);\n\n    auto A_layout = A.get_grid_layout();\n    auto B_layout = B.get_grid_layout();\n    auto C_layout = C.get_grid_layout();\n\n    std::cout << \"A matrix layout =\\n\" << A_layout.grid << std::endl;\n    std::cout << \"B matrix layout =\\n\" << B_layout.grid << std::endl;\n    std::cout << \"C matrix layout =\\n\" << C_layout.grid << std::endl;\n\n    /*\n    if (std::max(std::max(m, n), k) < 20) {\n        std::cout << \"\\n===============================\\n\\n\" << std::endl;\n        std::cout << \"Visually, the matrices is distributed among ranks as follows:\\n\\n\";\n        std::cout << \"Matrix A:\\n\";\n        for (unsigned bi = 0; bi < A_layout.grid.num_blocks_row(); ++bi) {\n            for (unsigned i = A_layout.grid.rows_interval(bi).start; \n                          i < A_layout.grid.rows_interval(bi).end; \n                          ++i) {\n                for (unsigned bj = 0; bj < A_layout.grid.num_blocks_col(); ++bj) {\n                    auto owner = A_layout.grid.owner(bi, bj);\n\n                    for (unsigned j = A_layout.grid.cols_interval(bj).start; \n                                  j < A_layout.grid.cols_interval(bj).end; \n                                  ++j) {\n                        std::cout << owner << \"\\t\";\n                    }\n                }\n                std::cout << \"\\n\";\n            }\n        }\n    }\n    */\n\n    return 0;\n}\n"
  },
  {
    "path": "miniapp/pxgemm_miniapp.cpp",
    "content": "// from std\n#include \"../utils/pxgemm_utils.hpp\"\n#include <cxxopts.hpp>\n#include <unordered_set> \n\nusing namespace cosma;\n\nint main(int argc, char **argv) {\n    // **************************************\n    //   setup command-line parser\n    // **************************************\n    cxxopts::Options options(\"COSMA PXGEMM MINIAPP\", \n        \"A miniapp computing: `C = alpha*A*B + beta*C` and comparing the performance of COSMA (with scalapack wrappers) VS SCALAPACK.\");\n\n    // **************************************\n    //   readout the command line arguments\n    // **************************************\n    // matrix dimensions\n    // dim(A) = m*k, dim(B) = k*n, dim(C) = m*n\n    options.add_options()\n        (\"m,m_dim\",\n            \"number of rows of A and C.\", \n            cxxopts::value<int>()->default_value(\"1000\"))\n        (\"n,n_dim\",\n            \"number of columns of B and C.\",\n            cxxopts::value<int>()->default_value(\"1000\"))\n        (\"k,k_dim\",\n            \"number of columns of A and rows of B.\", \n            cxxopts::value<int>()->default_value(\"1000\"))\n        (\"block_a\",\n            \"block dimensions for matrix A.\",\n             cxxopts::value<std::vector<int>>()->default_value(\"128,128\"))\n        (\"block_b\",\n            \"block dimensions for matrix B.\",\n             cxxopts::value<std::vector<int>>()->default_value(\"128,128\"))\n        (\"block_c\",\n            \"block dimensions for matrix C.\",\n             cxxopts::value<std::vector<int>>()->default_value(\"128,128\"))\n        (\"p,p_grid\",\n            \"processor 2D-decomposition.\",\n             cxxopts::value<std::vector<int>>()->default_value(\"1,1\"))\n        (\"transpose\",\n            \"transpose/Conjugate flags for A and B.\",\n             cxxopts::value<std::string>()->default_value(\"NN\"))\n        (\"alpha\",\n            \"alpha parameter in C = alpha*A*B + beta*C.\",\n            cxxopts::value<int>()->default_value(\"1\"))\n        (\"beta\",\n            \"beta parameter in C = alpha*A*B + beta*C.\",\n            cxxopts::value<int>()->default_value(\"0\"))\n        (\"r,n_rep\",\n            \"number of repetitions\",\n            cxxopts::value<int>()->default_value(\"2\"))\n        (\"t,type\",\n            \"data type of matrix entries.\",\n            cxxopts::value<std::string>()->default_value(\"double\"))\n        (\"test\",\n            \"test the result correctness.\",\n            cxxopts::value<bool>()->default_value(\"false\"))\n        (\"algorithm\", \n            \"defines which algorithm (cosma, scalapack or both) to run\",\n            cxxopts::value<std::string>()->default_value(\"both\"))\n        (\"h,help\", \"Print usage.\")\n    ;\n\n    auto result = options.parse(argc, argv);\n    if (result.count(\"help\")) {\n        std::cout << options.help() << std::endl;\n        return 0;\n    }\n\n    auto m = result[\"m_dim\"].as<int>();\n    auto n = result[\"n_dim\"].as<int>();\n    auto k = result[\"k_dim\"].as<int>();\n\n    auto block_a = result[\"block_a\"].as<std::vector<int>>();\n    auto block_b = result[\"block_b\"].as<std::vector<int>>();\n    auto block_c = result[\"block_c\"].as<std::vector<int>>();\n\n    auto p_grid = result[\"p_grid\"].as<std::vector<int>>();\n\n    auto transpose = result[\"transpose\"].as<std::string>();\n    // transform to upper-case\n    std::transform(transpose.begin(), transpose.end(), transpose.begin(), \n        [&](char c) {\n            return std::toupper(c);\n        }\n    );\n    std::unordered_set<std::string> transpose_options = {\n        \"NN\", \"TT\", \"NT\", \"TN\"\n    };\n    // check if transpose takes a correct value\n    if (std::find(transpose_options.begin(), transpose_options.end(), transpose) == transpose_options.end()) {\n        std::cout << \"COSMA (pxgemm_miniapp.cpp): ERROR: --transpose option \\\n        can only take the following values: \" << std::endl;\n        for (const auto& el : transpose_options) {\n            std::cout << el << \", \";\n        }\n        std::cout << std::endl;\n        return 0;\n    }\n\n    auto al = result[\"alpha\"].as<int>();\n    auto be = result[\"beta\"].as<int>();\n    // check if alpha and beta take correct values\n    if ((al != 0 && al != 1) || (be != 0 && be != 1)) {\n        std::cout << \"COSMA (pxgemm_miniapp.cpp): ERROR: in this miniapp, \\\n        --alpha and --beta options can only take values 0 or 1 corresponding to \\\n        the zero and the unit elements (respectively), of the chosen data type. \\\n        This is not a requirement of COSMA pxgemm wrapper, but just of this miniapp. \\\n        These elements are chosen because they are well defined also for complex data-types.\" \n        << std::endl;\n        return 0;\n    }\n\n    bool test_correctness = result[\"test\"].as<bool>();\n\n    auto n_rep = result[\"n_rep\"].as<int>();\n\n    auto type = result[\"type\"].as<std::string>();\n    // transform to lower-case\n    std::transform(type.begin(), type.end(), type.begin(), \n        [&](char c) {\n            return std::tolower(c);\n        }\n    );\n    // check if the type option takes a correct value\n    std::unordered_set<std::string> type_options = {\n        \"float\", \"double\", \"zfloat\", \"zdouble\"\n    };\n    if (type_options.find(type) == type_options.end()) {\n        std::cout << \"COSMA (pxgemm_miniapp.cpp): ERROR: --type option: can only take the following values: \" << std::endl;\n        for (const auto& el : type_options) {\n            std::cout << el << \", \";\n        }\n        std::cout << std::endl;\n        return 0;\n    }\n\n    char ta = transpose[0];\n    char tb = transpose[1];\n\n    // make lower-space\n    auto algorithm = result[\"algorithm\"].as<std::string>();\n    std::transform(algorithm.begin(), algorithm.end(), algorithm.begin(), \n        [&](char c) {\n            return std::tolower(c);\n        }\n    );\n\n    // check if the algorithm option takes a correct value\n    std::unordered_set<std::string> algorithm_options = {\n        \"cosma\", \"scalapack\", \"both\"\n    };\n    if (algorithm_options.find(algorithm) == algorithm_options.end()) {\n        std::cout << \"COSMA (pxgemm_miniapp.cpp): ERROR: --algorithm option: can only take the following values: \" << std::endl;\n        for (const auto& el : algorithm_options) {\n            std::cout << el << \", \";\n        }\n        std::cout << std::endl;\n        return 0;\n    }\n\n    // some basic checks\n    if (test_correctness) {\n        // if testing correctness, n_rep = 1;\n        n_rep = 1;\n        std::cout << \"COSMA(pxgemm_miniapp.cpp): WARNING: correctness checking enabled, setting `n_rep` to 1.\" << std::endl;\n        if (algorithm != \"both\") {\n            std::cout << \"COSMA(pxgemm_miniapp.cpp): WARNING: correctness checking enabled, setting `algorithm` to `both`.\" << std::endl;\n            algorithm = \"both\";\n        }\n    }\n\n    std::vector<long> cosma_times;\n    std::vector<long> scalapack_times;\n\n    bool result_correct = true;\n\n    // initilize MPI\n    MPI_Init(&argc, &argv);\n\n    int rank, P;\n    MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n    MPI_Comm_size(MPI_COMM_WORLD, &P);\n\n    // check if processor grid corresponds to P\n    if (p_grid[0] * p_grid[1] != P) {\n        p_grid[0] = 1;\n        p_grid[1] = P;\n        if (rank == 0) {\n            std::cout << \"COSMA(pxgemm_miniapp.cpp): warning: number of processors in the grid must be equal to P, setting grid to 1xP instead.\" << std::endl;\n        }\n    }\n\n    // *******************************\n    //   perform the multiplication\n    // ******************************\n    // no blacs functions will be invoked afterwards\n    bool exit_blacs = true;\n    try {\n        if (type == \"double\") {\n            // create the context here, so that\n            // it doesn't have to be created later\n            // (this is not necessary)\n            auto ctx = cosma::get_context_instance<double>();\n            if (rank == 0) {\n                ctx->turn_on_output();\n            }\n\n            double alpha = double{1.0 * al};\n            double beta = double{1.0 * be};\n            pxgemm_params<double> params(m, n, k, \n                                         block_a[0], block_a[1],\n                                         block_b[0], block_b[1],\n                                         block_c[0], block_c[1],\n                                         p_grid[0], p_grid[1],\n                                         ta, tb,\n                                         alpha, beta);\n\n            // **************************************\n            //    output the problem description\n            // **************************************\n            if (rank == 0) {\n                std::cout << \"Running PDGEMM on the following problem:\" << std::endl;\n                std::cout << params << std::endl;\n            }\n\n            result_correct = benchmark_pxgemm<double>(params, MPI_COMM_WORLD, n_rep,\n                                    algorithm,\n                                    cosma_times, scalapack_times, \n                                    test_correctness, exit_blacs);\n        } else if (type == \"float\") {\n            // create the context here, so that\n            // it doesn't have to be created later\n            // (this is not necessary)\n            auto ctx = cosma::get_context_instance<float>();\n            if (rank == 0) {\n                ctx->turn_on_output();\n            }\n\n            float alpha = float{1.0f * al};\n            float beta = float{1.0f * be};\n            pxgemm_params<float> params(m, n, k, \n                                         block_a[0], block_a[1],\n                                         block_b[0], block_b[1],\n                                         block_c[0], block_c[1],\n                                         p_grid[0], p_grid[1],\n                                         ta, tb,\n                                         alpha, beta);\n\n            // **************************************\n            //    output the problem description\n            // **************************************\n            if (rank == 0) {\n                std::cout << \"Running PSGEMM on the following problem:\" << std::endl;\n                std::cout << params << std::endl;\n            }\n\n            result_correct = benchmark_pxgemm<float>(params, MPI_COMM_WORLD, n_rep,\n                                    algorithm,\n                                    cosma_times, scalapack_times,\n                                    test_correctness, exit_blacs);\n\n        } else if (type == \"zfloat\") {\n            // create the context here, so that\n            // it doesn't have to be created later\n            // (this is not necessary)\n            auto ctx = cosma::get_context_instance<std::complex<float>>();\n            if (rank == 0) {\n                ctx->turn_on_output();\n            }\n\n            std::complex<float> alpha = std::complex<float>{1.0f * al};\n            std::complex<float> beta = std::complex<float>{1.0f * be};\n            pxgemm_params<std::complex<float>> params(m, n, k, \n                                         block_a[0], block_a[1],\n                                         block_b[0], block_b[1],\n                                         block_c[0], block_c[1],\n                                         p_grid[0], p_grid[1],\n                                         ta, tb,\n                                         alpha, beta);\n\n            // **************************************\n            //    output the problem description\n            // **************************************\n            if (rank == 0) {\n                std::cout << \"Running PCGEMM on the following problem:\" << std::endl;\n                std::cout << params << std::endl;\n            }\n\n            result_correct = benchmark_pxgemm<std::complex<float>>(params, MPI_COMM_WORLD, n_rep,\n                                    algorithm,\n                                    cosma_times, scalapack_times,\n                                    test_correctness, exit_blacs);\n        } else if (type == \"zdouble\") {\n            // create the context here, so that\n            // it doesn't have to be created later\n            // (this is not necessary)\n            auto ctx = cosma::get_context_instance<std::complex<double>>();\n            if (rank == 0) {\n                ctx->turn_on_output();\n            }\n\n            std::complex<double> alpha = std::complex<double>{1.0 * al};\n            std::complex<double> beta = std::complex<double>{1.0 * be};\n            pxgemm_params<std::complex<double>> params(m, n, k, \n                                         block_a[0], block_a[1],\n                                         block_b[0], block_b[1],\n                                         block_c[0], block_c[1],\n                                         p_grid[0], p_grid[1],\n                                         ta, tb,\n                                         alpha, beta);\n\n            // **************************************\n            //    output the problem description\n            // **************************************\n            if (rank == 0) {\n                std::cout << \"Running PZGEMM on the following problem:\" << std::endl;\n                std::cout << params << std::endl;\n            }\n\n            result_correct = benchmark_pxgemm<std::complex<double>>(params, MPI_COMM_WORLD, n_rep,\n                                    algorithm,\n                                    cosma_times, scalapack_times,\n                                    test_correctness, exit_blacs);\n        } else {\n            throw std::runtime_error(\"COSMA(pxgemm_miniapp): unknown data type of matrix entries.\");\n        }\n    } catch (const std::exception& e) {\n        // MPI is already finalized, but just in case\n        std::cout << e.what() << std::endl;\n        int flag = 0;\n        MPI_Finalized(&flag);\n        if (!flag) {\n            MPI_Abort(MPI_COMM_WORLD, -1);\n            MPI_Finalize();\n        }\n        return 0;\n    }\n\n    // *****************\n    //   output times\n    // *****************\n    if (rank == 0) {\n        if (algorithm == \"both\" || algorithm == \"cosma\") {\n            std::cout << \"COSMA TIMES [ms] = \";\n            for (auto &time : cosma_times) {\n                std::cout << time << \" \";\n            }\n            std::cout << std::endl;\n        }\n\n        if (algorithm == \"both\" || algorithm == \"scalapack\") {\n            std::cout << \"SCALAPACK TIMES [ms] = \";\n            for (auto &time : scalapack_times) {\n                std::cout << time << \" \";\n            }\n            std::cout << std::endl;\n        }\n    }\n\n    if (test_correctness) {\n        int result = result_correct ? 0 : 1;\n        int global_result = 0;\n        MPI_Reduce(&result, &global_result, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);\n        if (rank == 0) {\n            std::string yes_no = global_result == 0 ? \"\" : \" NOT\";\n            std::cout << \"Result is\" << yes_no << \" CORRECT!\" << std::endl;\n        }\n    }\n\n    MPI_Finalize();\n\n    return 0;\n}\n"
  },
  {
    "path": "scripts/build.sh",
    "content": "#!/bin/bash\n\n# !!! --------------------------------- !!!\n# !!! Adjust the script to your system. !!!\n# !!! --------------------------------- !!!\n\n# Clean the build directory.\n#\n#rm -rf CMakeCache.txt CMakeFiles\n\n# If MKL is used\n#\n#export MKLROOT=<FIXME>\n\n# If GPU back end is used (Tiled-MM), set the following path:\n#\n#export CUDA_PATH=<FIXME> \n\n# Options\n# ^^^^^^^\n# \n# `CMAKE_BUILD_TYPE` := Debug|Release|Profile (default: Release)\n#\n# `COSMA_WITH_TESTS`: = ON|OFF (default: ON if COSMA is not a subproject)\n#    Enables tests.\n#\n# `COSMA_WITH_APPS`: = ON|OFF (default: ON if COSMA is not a subproject)\n#    Enables miniapps.\n#\n# `COSMA_WITH_BENCHMARKS`: = ON|OFF (default: ON if COSMA is not a subproject)\n#    Enables benchmarks.\n#\n# `COSMA_WITH_PROFILING`: = ON|OFF (default: OFF)\n#    Enables profiling of COSMA with `semiprof`.\n#\n# BLAS (select one of:)\n#\n# `COSMA_BLAS` := MKL|OPENBLAS|CRAY_LIBSCI|CUSTOM|CUDA|ROCM (default: MKL)\n#\n# `COSMA_SCALAPACK` := OFF|MKL|CRAY_LIBSCI|CUSTOM (default: OFF)\n#\n#    Note: Mixing OpenMP runtimes results in performance issues. If you use \n#          COSMA within a large application, make sure that a single OpenMP\n#          back end is used. If using GCC, that should be GNU OpenMP, except\n#          on Mac. COSMA automically selects the right OpenMP runtime back end \n#          based on platform and compiler.\n#\ncmake <FIXME:cosma_source_dir> \\\n  -D CMAKE_INSTALL_PREFIX=<FIXME:cosma_install_dir> \\\n\n"
  },
  {
    "path": "scripts/daint-mc_env.sh",
    "content": "module switch PrgEnv-cray PrgEnv-gnu\nmodule load daint-mc\nmodule load CMake\nmodule load intel # defines $MKLROOT\n\n# enable the dynamic linking and\n# the asynchronous thread progressing\n# MPICH (on Cray systems)\nexport CRAYPE_LINK_TYPE=dynamic\nexport MPICH_NEMESIS_ASYNC_PROGRESS=MC\nexport MPICH_MAX_THREAD_SAFETY=multiple\nexport MPICH_GNI_ASYNC_PROGRESS_TIMEOUT=0\n\n# setup the right compilers\nexport CC=`which cc`\nexport CXX=`which CC`\n"
  },
  {
    "path": "scripts/install_dependencies.py",
    "content": "#!/usr/bin/env python3 \nimport argparse\nimport os\nimport sys\nimport tempfile\nimport subprocess\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n        'prefix', \n        type=str,\n        help='Installation prefix for dependencies'\n        )\nargs = parser.parse_args()\nif not os.path.isdir(args.prefix):\n    print(\"The argument is not a directory.\")\n    sys.exit()\n\n\ndef install_lib(tmppath, prefix, libname):\n    url = 'https://github.com/kabicm/{libname}.git'.format(**locals())\n    clone_dir = os.path.join(tmppath, libname)\n    build_dir = os.path.join(tmppath, 'build_{libname}'.format(**locals()))\n    install_dir ='{prefix}/{libname}-master'.format(**locals())\n\n    config_cmd = ('cmake ../{libname} '\n                    '-DCMAKE_BUILD_TYPE=Release '\n                    '-DCMAKE_INSTALL_PREFIX={install_dir}'.format(**locals())\n                 )\n    build_and_install_cmd = 'cmake --build . --target install'\n    os.system('git clone --recursive {url} {clone_dir}'.format(**locals()))\n    os.makedirs(build_dir, exist_ok=True)\n    subprocess.call(config_cmd, cwd=build_dir, shell=True)\n    subprocess.call(build_and_install_cmd, cwd=build_dir, shell=True)\n\n    return install_dir\n\nwith tempfile.TemporaryDirectory() as tmppath:\n    install_dirs = ''\n    for libname in ['options', 'semiprof', 'grid2grid']:\n        install_dirs += '{};'.format(install_lib(tmppath, args.prefix, libname)) \n\n    print('\\nUse the following CMake parameter: -DCMAKE_PREFIX_PATH=\"{}\"'.format(install_dirs))\n"
  },
  {
    "path": "scripts/piz_daint_cpu.sh",
    "content": "# load the necessary modules\nmodule load daint-mc\nmodule swap PrgEnv-cray PrgEnv-gnu\nmodule load CMake\nmodule unload cray-libsci\nmodule load intel # defines $MKLROOT\n\n# Setup the compiler\n#\nexport CC=`which cc`\nexport CXX=`which CC`\n\n# Enable dynamic linking\n#\nexport CRAYPE_LINK_TYPE=dynamic\n\n# Enable threading\n# \nexport OMP_NUM_THREADS=18\nexport MKL_NUM_THREADS=18\n"
  },
  {
    "path": "scripts/piz_daint_gpu.sh",
    "content": "# load the necessary modules\nmodule load daint-gpu\nmodule swap PrgEnv-cray PrgEnv-gnu\nmodule swap gcc/11.2.0 gcc/9.3.0\nmodule unload cray-libsci\nmodule load intel # defines $MKLROOT\nmodule load cudatoolkit\nmodule load CMake\n\n# Setup the compiler\n#\nexport CC=`which cc`\nexport CXX=`which CC`\n\nexport NCCL_ROOT=/opt/nvidia/hpc_sdk/Linux_x86_64/21.3/comm_libs/nccl\nexport NCCL_IB_DISABLE=1\n# export NCCL_ROOT=/scratch/snx3000/kabicm/nccl/build\n# export NCCL_PKG_CONFIG=/opt/nvidia/hpc_sdk/Linux_x86_64/21.3/comm_libs/nccl/lib/pkgconfig/\n# export PKG_CONFIG_PATH=${NCCL_PKG_CONFIG}:${PKG_CONFIG_PATH}\n\n# Enable dynamic linking\n#\nexport CRAYPE_LINK_TYPE=dynamic\nexport CRAY_CUDA_MPS=1\nexport MPICH_RDMA_ENABLED_CUDA=1\n\n# Enable threading\n# \nexport OMP_NUM_THREADS=12\nexport MKL_NUM_THREADS=12\n"
  },
  {
    "path": "scripts/piz_daint_gpu_aware_mpi.sh",
    "content": "# load the necessary modules\nmodule load daint-gpu\nmodule swap PrgEnv-cray PrgEnv-gnu\nmodule swap gcc/11.2.0 gcc/9.3.0\nmodule unload cray-libsci\nmodule load intel # defines $MKLROOT\nmodule load cudatoolkit\nmodule load CMake\n\n# Setup the compiler\n#\nexport CC=`which cc`\nexport CXX=`which CC`\n\nexport MPICH_RDMA_ENABLED_CUDA=1\nexport MPICH_GPU_SUPPORT_ENABLED=1\nexport MPICH_NO_GPU_DIRECT=1\n# export NCCL_ROOT=/scratch/snx3000/kabicm/nccl/build\n# export NCCL_PKG_CONFIG=/opt/nvidia/hpc_sdk/Linux_x86_64/21.3/comm_libs/nccl/lib/pkgconfig/\n# export PKG_CONFIG_PATH=${NCCL_PKG_CONFIG}:${PKG_CONFIG_PATH}\n\n# Enable dynamic linking\n#\nexport CRAYPE_LINK_TYPE=dynamic\nexport CRAY_CUDA_MPS=1\n\n# Enable threading\n# \nexport OMP_NUM_THREADS=12\nexport MKL_NUM_THREADS=12\n"
  },
  {
    "path": "scripts/run_gpu.sh",
    "content": "#!/bin/bash -l\n#SBATCH --job-name=matmul\n#SBATCH --time=00:03:00\n#SBATCH --nodes=4\n#SBATCH --constraint=gpu\n#set -x\n\nmodule load daint-gpu\nmodule swap PrgEnv-cray PrgEnv-gnu\nmodule unload cray-libsci\nmodule load intel\nmodule load CMake\nmodule load cudatoolkit\nexport CC=`which cc`\nexport CXX=`which CC`\nexport CRAYPE_LINK_TYPE=dynamic\nexport CRAY_CUDA_MPS=1\n\nn_iter=1 srun -u -N 4 -n 48 ./build/miniapp/cosma-miniapp -m 25000 -n 25000 -k 25000 -P 48\n\n"
  },
  {
    "path": "scripts/schedule_miniapp_on_daint_cpu.sh",
    "content": "#!/bin/bash -l\n#SBATCH --job-name=cosma_miniapp\n#SBATCH --constraint=mc\n#SBATCH --nodes=10\n#SBATCH --ntasks-per-node=2\n#SBATCH --time=2\n#SBATCH --output=cosma_miniapp.out\n#SBATCH --error=cosma_miniapp.err\n\nmodule load daint-mc\nmodule swap PrgEnv-cray PrgEnv-gnu\nmodule load CMake\nmodule unload cray-libsci\nmodule load intel # defines $MKLROOT\n\n# Setup the compiler\n#\nexport CC=`which cc`\nexport CXX=`which CC`\n\n# Enable dynamic linking\n#\nexport CRAYPE_LINK_TYPE=dynamic\n\n# Enable threading\n# \nexport OMP_NUM_THREADS=18\nexport MKL_NUM_THREADS=18\n\n# Move to `build` directory if not there already\n#\n# SCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null && pwd )\"\n# COSMA_DIR=\"$( dirname $SCRIPT_DIR )\"\nCOSMA_DIR=$SCRATCH/cosma-master\nMINIAPP_PATH=${COSMA_DIR}/build/miniapp\n\n# Run tests\n#\necho \"=====================\"\necho \"   Square Matrices\"\necho \"=====================\"\necho \"(m, n, k) = (10000, 10000, 10000)\"\necho \"Nodes: 10\"\necho \"MPI processes per rank: 2\"\necho \"\"\nsrun ${MINIAPP_PATH}/scalars_miniapp -m 10000 -n 10000 -k 10000 -P 20\n\necho \"\"\necho \"=====================\"\necho \"    Tall Matrices\"\necho \"=====================\"\necho \"(m, n, k) = (1000, 1000, 1000000)\"\necho \"Nodes: 10\"\necho \"MPI processes per rank: 2\"\necho \"\"\nsrun ${MINIAPP_PATH}/scalars_miniapp -m 1000 -n 1000 -k 1000000 -P 20\n\n\n"
  },
  {
    "path": "scripts/schedule_miniapp_on_daint_gpu.sh",
    "content": "#!/bin/bash -l\n#SBATCH --job-name=cosma_miniapp_gpu\n#SBATCH --constraint=gpu\n#SBATCH --nodes=10\n#SBATCH --ntasks-per-node=2\n#SBATCH --time=2\n#SBATCH --output=cosma_miniapp_gpu.out\n#SBATCH --error=cosma_miniapp_gpu.err\n\nmodule load daint-gpu\nmodule swap PrgEnv-cray PrgEnv-gnu\nmodule load CMake\nmodule unload cray-libsci\nmodule load intel # defines $MKLROOT\nmodule load cudatoolkit # cublas\n\n# Setup the compiler\n#\nexport CC=`which cc`\nexport CXX=`which CC`\nexport CRAY_CUDA_MPS=1 # enables multiple ranks sharing the same GPU\n\n# Enable dynamic linking\n#\nexport CRAYPE_LINK_TYPE=dynamic\n\n# Enable threading\n# \nexport OMP_NUM_THREADS=12\nexport MKL_NUM_THREADS=12\n\n# Move to `build` directory if not there already\n#\n# SCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null && pwd )\"\n# COSMA_DIR=\"$( dirname $SCRIPT_DIR )\"\nCOSMA_DIR=$SCRATCH/COSMA\nMINIAPP_PATH=${COSMA_DIR}/build/miniapp\n\n# Run tests\n#\necho \"=====================\"\necho \"   Square Matrices\"\necho \"=====================\"\necho \"(m, n, k) = (10000, 10000, 10000)\"\necho \"Nodes: 10\"\necho \"MPI processes per rank: 2\"\necho \"\"\nsrun ${MINIAPP_PATH}/scalars_miniapp -m 10000 -n 10000 -k 10000 -P 20\n\necho \"\"\necho \"=====================\"\necho \"    Tall Matrices\"\necho \"=====================\"\necho \"(m, n, k) = (1000, 1000, 1000000)\"\necho \"Nodes: 10\"\necho \"MPI processes per rank: 2\"\necho \"\"\nsrun ${MINIAPP_PATH}/scalars_miniapp -m 1000 -n 1000 -k 1000000 -P 20\n"
  },
  {
    "path": "scripts/schedule_tests_on_daint.sh",
    "content": "#!/bin/bash -l\n#SBATCH --job-name=cosma_tests\n#SBATCH --constraint=mc\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=16\n#SBATCH --time=2\n#SBATCH --output=cosma_tests.out\n#SBATCH --error=cosma_tests.err\n\nmodule load daint-mc\nmodule swap PrgEnv-cray PrgEnv-gnu\nmodule load CMake\nmodule unload cray-libsci\nmodule load intel # defines $MKLROOT\n\n# Setup the compiler\n#\nexport CC=`which cc`\nexport CXX=`which CC`\n\n# Enable dynamic linking\n#\nexport CRAYPE_LINK_TYPE=dynamic\n\n# Move to `build` directory if not there already\n#\n# SCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null && pwd )\"\n# COSMA_DIR=\"$( dirname $SCRIPT_DIR )\"\nCOSMA_DIR=$SCRATCH/COSMA\nmkdir -p ${COSMA_DIR}/build\ncd ${COSMA_DIR}/build\n\n# Build tests if not already built\n#\nmake tests\n\n# Run tests\n#\nsrun -n 1 tests/test.mapper\nsrun -n 4 tests/test.multiply_using_layout\nsrun -n 8 tests/test.scalar_matmul\nsrun -n 16 tests/test.pdgemm\nsrun -n 16 tests/test.multiply\n"
  },
  {
    "path": "spack/packages/costa/package.py",
    "content": "# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\nfrom spack.package import *\n\n\nclass Costa(CMakePackage):\n    \"\"\"\n    Distributed Communication-Optimal Matrix Transpose and Reshuffle Library\n    Based on the paper: https://arxiv.org/abs/2106.06601\n    \"\"\"\n\n    maintainers(\"haampie\", \"kabicm\", \"RMeli\", \"mtaillefumier\")\n    homepage = \"https://github.com/eth-cscs/COSTA\"\n    git = \"https://github.com/eth-cscs/COSTA.git\"\n\n    license(\"BSD-3-Clause\")\n\n    # note: The default archives produced with github do not have the archives\n    #       of the submodules.\n    version(\"master\", branch=\"master\", submodules=True)\n    version(\"2.3.0\", sha256=\"0413311a2821d4cd1f3f026672a75a5b5a2956f61305c07d7fc14565a126b517\")\n    version(\"2.2.2\", sha256=\"e87bc37aad14ac0c5922237be5d5390145c9ac6aef0350ed17d86cb2d994e67c\")\n    version(\"2.2.2\", sha256=\"e87bc37aad14ac0c5922237be5d5390145c9ac6aef0350ed17d86cb2d994e67c\")\n    version(\"2.2.1\", sha256=\"aa8aa2a4a79de094f857c22293825de270ff72becd6bd736ff9f2dd8c192446d\")\n    version(\"2.2\", sha256=\"3e7333f012af76ec3508276ea90800313f6136504667021fe229e710bf6acdc7\")\n    version(\"2.1\", sha256=\"c1e86452415083f7470b292d93ec60708b7c8dbafc2bac383636bb4b28135866\")\n    version(\"2.0\", sha256=\"de250197f31f7d23226c6956a687c3ff46fb0ff6c621a932428236c3f7925fe4\")\n\n    depends_on(\"cxx\", type=\"build\")  # generated\n\n    variant(\"scalapack\", default=False, description=\"Build with ScaLAPACK API\")\n    variant(\"shared\", default=True, description=\"Build shared libraries\")\n    variant(\"profiling\", default=False, description=\"Enable profiling\")\n    variant(\"tests\", default=False, description=\"Enable tests\")\n    variant(\"apps\", default=False, description=\"Enable miniapp\")\n    variant(\"benchmarks\", default=False, description=\"Enable benchmarks\")\n\n    depends_on(\"cmake@3.22:\", type=\"build\")\n    depends_on(\"mpi@3:\")\n    depends_on(\"scalapack\", when=\"+scalapack\")\n    depends_on(\"cxxopts\", when=\"+apps\")\n    depends_on(\"cxxopts\", when=\"+tests\")\n    depends_on(\"semiprof\", when=\"+profiling\")\n\n    def url_for_version(self, version):\n        if version == Version(\"2.0\"):\n            return \"https://github.com/eth-cscs/COSTA/releases/download/v{0}/COSTA-v{1}.tar.gz\".format(\n                version, version\n            )\n        return \"https://github.com/eth-cscs/COSTA/archive/refs/tags/v{0}.tar.gz\".format(version)\n\n    def setup_build_environment(self, env):\n        return\n\n    def costa_scalapack_cmake_arg(self):\n        spec = self.spec\n\n        if spec.satisfies(\"~scalapack\"):\n            return \"OFF\"\n        elif spec.satisfies(\"^intel-mkl\") or spec.satisfies(\"^intel-oneapi-mkl\"):\n            return \"MKL\"\n        elif spec.satisfies(\"^cray-libsci\"):\n            return \"CRAY_LIBSCI\"\n\n        return \"CUSTOM\"\n\n    def cmake_args(self):\n        return [\n            self.define_from_variant(\"COSTA_WITH_BENCHMARKS\", \"benchmarks\"),\n            self.define_from_variant(\"COSTA_WITH_APPS\", \"apps\"),\n            self.define_from_variant(\"COSTA_WITH_TESTS\", \"tests\"),\n            self.define_from_variant(\"COSTA_WITH_PROFILING\", \"profiling\"),\n            self.define_from_variant(\"BUILD_SHARED_LIBS\", \"shared\"),\n            self.define(\"COSTA_SCALAPACK\", self.costa_scalapack_cmake_arg()),\n        ]\n"
  },
  {
    "path": "spack_repo/cosma/packages/cosma/fj-ssl2.patch",
    "content": "diff --git a/CMakeLists.txt b/CMakeLists.txt\nindex 1fd1e55..41a041b 100644\n--- a/CMakeLists.txt\n+++ b/CMakeLists.txt\n@@ -19,7 +19,7 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS \"YES\") # always write compile_commands.json\n \n set(COSMA_GPU_BACKENDS_LIST \"CUDA\" \"ROCM\")\n set(COSMA_SCALAPACK_LIST \"OFF\" \"MKL\" \"CRAY_LIBSCI\" \"CUSTOM\")\n-set(COSMA_BLAS_LIST   \"auto\" \"MKL\" \"OPENBLAS\" \"CRAY_LIBSCI\" \"CUSTOM\" \"BLIS\" \"ATLAS\" \"CUDA\" \"ROCM\" \"OFF\")\n+set(COSMA_BLAS_LIST   \"auto\" \"MKL\" \"SSL2\" \"OPENBLAS\" \"CRAY_LIBSCI\" \"CUSTOM\" \"BLIS\" \"ATLAS\" \"CUDA\" \"ROCM\" \"OFF\")\n option(COSMA_WITH_TESTS \"Generate the test target.\" ON)\n option(COSMA_WITH_APPS \"Generate the miniapp targets.\" ON)\n option(COSMA_WITH_BENCHMARKS \"Generate the benchmark targets.\" ON)\n@@ -45,7 +45,7 @@ if (COSMA_BLAS MATCHES \"CUDA|ROCM\")\n   set(COSMA_GPU_BACKEND ${COSMA_BLAS})\n else()\n   if(COSMA_BLAS STREQUAL \"OFF\")\n-    message(FATAL_ERROR \"A Blas implementation is needed when running on CPU only: choices are : auto, MKL, OPENBLAS, CRAY_LIBSCI, CUSTOM, BLIS, ATLAS, FLEXIBLAS, ARMPL, GenericBLAS\")\n+    message(FATAL_ERROR \"A Blas implementation is needed when running on CPU only: choices are : auto, MKL, SSL2, OPENBLAS, CRAY_LIBSCI, CUSTOM, BLIS, ATLAS, FLEXIBLAS, ARMPL, GenericBLAS\")\n   else()\n     set(COSMA_BLAS_VENDOR ${COSMA_BLAS})\n   endif()\n@@ -190,6 +190,7 @@ install(FILES \"${cosma_BINARY_DIR}/cosmaConfig.cmake\"\n   \"${cosma_BINARY_DIR}/cosmaConfigVersion.cmake\"\n   \"${cosma_BINARY_DIR}/cosmaConfigVersion.cmake\"\n   \"${cosma_SOURCE_DIR}/cmake/FindMKL.cmake\"\n+  \"${cosma_SOURCE_DIR}/cmake/FindSSL2.cmake\"\n   \"${cosma_SOURCE_DIR}/cmake/FindBlas.cmake\"\n   \"${cosma_SOURCE_DIR}/cmake/FindSCALAPACK.cmake\"\n   \"${cosma_SOURCE_DIR}/cmake/FindOPENBLAS.cmake\"\ndiff --git a/cmake/FindBlas.cmake b/cmake/FindBlas.cmake\nindex aef956c..3c47561 100644\n--- a/cmake/FindBlas.cmake\n+++ b/cmake/FindBlas.cmake\n@@ -14,6 +14,7 @@ endif()\n set(COSMA_BLAS_VENDOR_LIST\n   \"auto\"\n   \"MKL\"\n+  \"SSL2\"\n   \"OPENBLAS\"\n   \"FLEXIBLAS\"\n   \"ARMPL\"\ndiff --git a/cmake/FindSSL2.cmake b/cmake/FindSSL2.cmake\nnew file mode 100644\nindex 0000000..f0e11bf\n--- /dev/null\n+++ b/cmake/FindSSL2.cmake\n@@ -0,0 +1,56 @@\n+#.rst:\n+# FindSSL2\n+# -----------\n+#\n+# This module tries to find the SSL2 library.\n+#\n+# The following variables are set\n+#\n+# ::\n+#\n+#   SSL2_FOUND           - True if ssl2 is found\n+#   SSL2_LIBRARIES       - The required libraries\n+#   SSL2_INCLUDE_DIRS    - The required include directory\n+#\n+# The following import target is created\n+#\n+# ::\n+#\n+#   SSL2::ssl2\n+\n+#set paths to look for library from ROOT variables.If new policy is set, find_library() automatically uses them.\n+# if(NOT POLICY CMP0074)\n+set(_SSL2_PATHS ${SSL2_ROOT}\n+                 $ENV{SSL2_ROOT}\n+                 $ENV{SSL2ROOT}\n+                 $ENV{SSL2_DIR}\n+                 $ENV{SSL2DIR})\n+# endif()\n+\n+find_library(\n+    COSMA_SSL2_LINK_LIBRARIES\n+    NAMES \"fjlapackex\"\n+    HINTS ${_SSL2_PATHS}\n+    PATH_SUFFIXES \"lib64\"\n+)\n+find_path(\n+    COSMA_SSL2_INCLUDE_DIRS\n+    NAMES \"cblas.h\" \n+    HINTS ${_SSL2_PATHS}\n+    PATH_SUFFIXES \"include\"\n+)\n+\n+# check if found\n+include(FindPackageHandleStandardArgs)\n+find_package_handle_standard_args(SSL2 REQUIRED_VARS COSMA_SSL2_INCLUDE_DIRS COSMA_SSL2_LINK_LIBRARIES)\n+\n+# add target to link against\n+if(NOT TARGET cosma::BLAS::SSL2::ssl2)\n+  add_library(cosma::BLAS::SSL2::ssl2 INTERFACE IMPORTED)\n+  add_library(cosma::BLAS::SSL2::blas ALIAS cosma::BLAS::SSL2::ssl2)\n+endif()\n+set_property(TARGET cosma::BLAS::SSL2::ssl2 PROPERTY INTERFACE_LINK_LIBRARIES ${COSMA_SSL2_LINK_LIBRARIES})\n+set_property(TARGET cosma::BLAS::SSL2::ssl2 PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${COSMA_SSL2_INCLUDE_DIRS})\n+\n+# prevent clutter in cache\n+MARK_AS_ADVANCED(SSL2_FOUND SSL2_LIBRARIES SSL2_INCLUDE_DIRS)\n"
  },
  {
    "path": "spack_repo/cosma/packages/cosma/package.py",
    "content": "# Copyright Spack Project Developers. See COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\nfrom spack_repo.builtin.build_systems.cmake import CMakePackage\n\nfrom spack.package import *\n\n\nclass Cosma(CMakePackage):\n    \"\"\"\n    Distributed Communication-Optimal Matrix-Matrix Multiplication Library\n    \"\"\"\n\n    maintainers(\"haampie\", \"kabicm\", \"teonnik\", \"simonpintarelli\", \"mtaillefumier\")\n    homepage = \"https://github.com/eth-cscs/COSMA\"\n    url = \"https://github.com/eth-cscs/COSMA/archive/refs/tags/v2.6.6.tar.gz\"\n    git = \"https://github.com/eth-cscs/COSMA.git\"\n\n    license(\"BSD-3-Clause\")\n\n    # note: The default archives produced with github do not have the archives\n    #       of the submodules.\n    version(\"master\", branch=\"master\", submodules=False)\n    version(\"2.7.0\", sha256=\"f4775d18379539d7bb5053bff8acb4e13d6ed31a9677f498d9099a7500488789\")\n    version(\"2.6.6\", sha256=\"1604be101e77192fbcc5551236bc87888d336e402f5409bbdd9dea900401cc37\")\n    version(\"2.6.5\", sha256=\"10d9b7ecc1ce44ec5b9e0c0bf89278a63029912ec3ea99661be8576b553ececf\")\n    version(\"2.6.4\", sha256=\"6d7bd5e3005874af9542a329c93e7ccd29ca1a5573dae27618fac2704fa2b6ab\")\n    version(\"2.6.3\", sha256=\"c2a3735ea8f860930bea6706d968497d72a1be0498c689b5bc4a951ffc2d1146\")\n    version(\"2.6.2\", sha256=\"2debb5123cc35aeebc5fd2f8a46cfd6356d1e27618c9bb57129ecd09aa400940\")\n    version(\"2.6.1\", sha256=\"69aa6634a030674f0d9be61e7b0bf0dc17acf0fc9e7a90b40e3179e2254c8d67\")\n    version(\"2.5.1\", sha256=\"085b7787597374244bbb1eb89bc69bf58c35f6c85be805e881e1c0b25166c3ce\")\n    version(\"2.5.0\", sha256=\"7f68bb0ee5c80f9b8df858afcbd017ad4ed87ac09439d13d7d890844dbdd3d54\")\n    version(\"2.4.0\", sha256=\"5714315ce06d48037f86cfee2d7f19340643fee95e9d7f1e92dc1b623b67e395\")\n    version(\"2.3.0\", sha256=\"0c01c2deb5a0cd177952178350188a62c42ce55e604d7948ac472f55bf0d4815\")\n    version(\"2.2.0\", sha256=\"1eb92a98110df595070a12193b9221eecf9d103ced8836c960f6c79a2bd553ca\")\n    version(\"2.0.7\", sha256=\"8d70bfcbda6239b6a8fbeaca138790bbe58c0c3aa576879480d2632d4936cf7e\")\n    version(\"2.0.2\", sha256=\"4f3354828bc718f3eef2f0098c3bdca3499297497a220da32db1acd57920c68d\")\n\n    # We just need the libraries of cuda and rocm, so no need to extend\n    # CudaPackage or ROCmPackage.\n    variant(\"cuda\", default=False, description=\"Build with cuBLAS support\")\n    variant(\"rocm\", default=False, description=\"Build with rocBLAS support\")\n    variant(\"scalapack\", default=False, description=\"Build with ScaLAPACK API\")\n    variant(\"shared\", default=True, description=\"Build the shared library version\")\n    variant(\"tests\", default=False, description=\"Build tests\")\n    variant(\"apps\", default=False, description=\"Build miniapp\")\n    variant(\"profiling\", default=False, description=\"Enable profiling\")\n    variant(\"gpu_direct\", default=False, description=\"GPU aware MPI\")\n\n    with when(\"+cuda\"):\n        variant(\"nccl\", default=False, description=\"Use cuda nccl\")\n\n    with when(\"+rocm\"):\n        variant(\"rccl\", default=False, description=\"Use rocm rccl\")\n\n    with when(\"@2.8.0:+rocm\"):\n        variant(\"unified_memory\", default=False)\n\n    depends_on(\"cxx\", type=\"build\")\n    depends_on(\"c\", type=\"build\")\n    depends_on(\"fortran\", type=\"build\")\n\n    depends_on(\"cmake@3.22:\", type=\"build\")\n    depends_on(\"mpi@3:\")\n    depends_on(\"blas\", when=\"~cuda ~rocm\")\n    depends_on(\"scalapack\", when=\"+scalapack\")\n    depends_on(\"cuda\", when=\"+cuda\")\n    depends_on(\"rocblas\", when=\"+rocm\")\n    depends_on(\"nccl\", when=\"+nccl\")\n    depends_on(\"rccl\", when=\"+rccl\")\n\n    with when(\"@2.6.3:\"):\n        depends_on(\"tiled-mm@2.2:+cuda\", when=\"+cuda\")\n        depends_on(\"tiled-mm@2.2:+rocm\", when=\"+rocm\")\n\n    with when(\"@2.6.1:2.6.2\"):\n        depends_on(\"tiled-mm@2.0+rocm\", when=\"+rocm\")\n        depends_on(\"tiled-mm@2.0+cuda\", when=\"+cuda\")\n\n    with when(\"@2.6.1:\"):\n        depends_on(\"costa\")\n        depends_on(\"costa+scalapack\", when=\"+scalapack\")\n        depends_on(\"cxxopts\", when=\"+apps\")\n        depends_on(\"cxxopts\", when=\"+tests\")\n        depends_on(\"semiprof\", when=\"+profiling\")\n        depends_on(\"costa+profiling\", when=\"+profiling\")\n\n    patch(\"fj-ssl2.patch\", when=\"^fujitsu-ssl2\")\n\n    def setup_build_environment(self, env: EnvironmentModifications) -> None:\n        if self.spec.satisfies(\"+cuda\"):\n            env.set(\"CUDA_PATH\", self.spec[\"cuda\"].prefix)\n\n    def cosma_blas_cmake_arg(self):\n        query_to_cmake_arg = [\n            (\"+cuda\", \"CUDA\"),\n            (\"+rocm\", \"ROCM\"),\n            (\"^[virtuals=blas] intel-oneapi-mkl\", \"MKL\"),\n            (\"^[virtuals=blas] cray-libsci\", \"CRAY_LIBSCI\"),\n            (\"^[virtuals=blas] netlib-lapack\", \"CUSTOM\"),\n            (\"^[virtuals=blas] openblas\", \"OPENBLAS\"),\n            (\"^[virtuals=blas] fujitsu-ssl2\", \"SSL2\"),\n        ]\n\n        if self.version >= Version(\"2.4.0\"):\n            query_to_cmake_arg.extend(\n                [\n                    (\"^[virtuals=blas] blis\", \"BLIS\"),\n                    (\"^[virtuals=blas] amdblis\", \"BLIS\"),\n                    (\"^[virtuals=blas] atlas\", \"ATLAS\"),\n                ]\n            )\n\n        for query, cmake_arg in query_to_cmake_arg:\n            if query in self.spec:\n                return cmake_arg\n\n        return \"CUSTOM\"\n\n    def cosma_scalapack_cmake_arg(self):\n        spec = self.spec\n\n        if spec.satisfies(\"~scalapack\"):\n            return \"OFF\"\n        elif spec.satisfies(\"^[virtuals=scalapack] intel-oneapi-mkl\"):\n            return \"MKL\"\n        elif spec.satisfies(\"^cray-libsci\"):\n            return \"CRAY_LIBSCI\"\n\n        return \"CUSTOM\"\n\n    def cmake_args(self):\n        return [\n            self.define_from_variant(\"COSMA_WITH_TESTS\", \"tests\"),\n            self.define_from_variant(\"COSMA_WITH_APPS\", \"apps\"),\n            self.define_from_variant(\"COSMA_WITH_NCCL\", \"nccl\"),\n            self.define_from_variant(\"COSMA_WITH_RCCL\", \"rccl\"),\n            self.define_from_variant(\"COSMA_WITH_GPU_AWARE_MPI\", \"gpu_direct\"),\n            self.define_from_variant(\"COSMA_WITH_PROFILING\", \"profiling\"),\n            self.define_from_variant(\"COSMA_USE_UNIFIED_MEMORY\", \"unified_memory\"),\n            self.define(\"COSMA_WITH_BENCHMARKS\", False),\n            self.define(\"COSMA_BLAS\", self.cosma_blas_cmake_arg()),\n            self.define(\"COSMA_SCALAPACK\", self.cosma_scalapack_cmake_arg()),\n            self.define_from_variant(\"BUILD_SHARED_LIBS\", \"shared\"),\n        ]\n"
  },
  {
    "path": "spack_repo/cosma/packages/tiled-mm/package.py",
    "content": "# Copyright Spack Project Developers. See COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack_repo.builtin.build_systems.cmake import CMakePackage\nfrom spack_repo.builtin.build_systems.cuda import CudaPackage\nfrom spack_repo.builtin.build_systems.rocm import ROCmPackage\n\nfrom spack.package import *\n\n\nclass TiledMm(CMakePackage, CudaPackage, ROCmPackage):\n    \"\"\"Matrix multiplication on GPUs for matrices stored on a CPU. Similar to cublasXt,\n    but ported to both NVIDIA and AMD GPUs.\"\"\"\n\n    homepage = \"https://github.com/eth-cscs/Tiled-MM/\"\n    url = \"https://github.com/eth-cscs/Tiled-MM/archive/refs/tags/v2.0.tar.gz\"\n    git = \"https://github.com/eth-cscs/Tiled-MM.git\"\n\n    maintainers(\"mtaillefumier\", \"simonpintarelli\", \"RMeli\")\n\n    license(\"BSD-3-Clause\")\n\n    version(\"master\", branch=\"master\")\n\n    version(\"2.3.2\", sha256=\"1f91ca02f6ee8e400835fa90630618baf86a7b425b4bbbb4151068f72658b858\")\n    version(\"2.3.1\", sha256=\"68914a483e62f796b790ea428210b1d5ef5943d6289e53d1aa62f56a20fbccc8\")\n    version(\"2.3\", sha256=\"504c6201f5a9be9741c55036bf8e2656ae3f4bc19996295b264ee5e303c9253c\")\n    version(\"2.2\", sha256=\"6d0b49c9588ece744166822fd44a7bc5bec3dc666b836de8bf4bf1a7bb675aac\")\n    version(\"2.0\", sha256=\"ea554aea8c53d7c8e40044e6d478c0e8137d7e8b09d7cb9650703430d92cf32e\")\n\n    variant(\"shared\", default=True, description=\"Build shared libraries\")\n    variant(\"examples\", default=False, description=\"Enable examples\")\n    variant(\"tests\", default=False, description=\"Enable tests\")\n\n    depends_on(\"cxx\", type=\"build\")  # generated\n\n    depends_on(\"rocblas\", when=\"+rocm\")\n    depends_on(\"cxxopts\", when=\"+tests\")\n    depends_on(\"cxxopts\", when=\"+examples\")\n\n    conflicts(\"~cuda~rocm\")\n    conflicts(\"+cuda\", when=\"+rocm\")\n\n    def cmake_args(self):\n        args = [\n            self.define_from_variant(\"BUILD_SHARED_LIBS\", \"shared\"),\n            self.define_from_variant(\"TILEDMM_WITH_EXAMPLES\", \"examples\"),\n            self.define_from_variant(\"TILEDMM_WITH_TESTS\", \"tests\"),\n        ]\n\n        if \"+rocm\" in self.spec:\n            args.extend([self.define(\"TILEDMM_GPU_BACKEND\", \"ROCM\")])\n\n        if \"+cuda\" in self.spec:\n            args.extend([self.define(\"TILEDMM_GPU_BACKEND\", \"CUDA\")])\n\n        return args\n"
  },
  {
    "path": "spack_repo/cosma/repo.yaml",
    "content": "repo:\n  namespace: cosma\n"
  },
  {
    "path": "src/cosma/CMakeLists.txt",
    "content": "set(INSTALLED_TARGETS_LIST \"\")\nset(cosma_src_files\n  buffer.cpp\n  communicator.cpp\n  context.cpp\n  interval.cpp\n  layout.cpp\n  local_multiply.cpp\n  mapper.cpp\n  math_utils.cpp\n  matrix.cpp\n  memory_pool.cpp\n  multiply.cpp\n  one_sided_communicator.cpp\n  strategy.cpp\n  two_sided_communicator.cpp\n  random_generator.hpp\n  cinterface.cpp\n  environment_variables.cpp)\n\nif (COSMA_GPU_BACKEND MATCHES \"OFF\")\n\tLIST(APPEND cosma_src_files blas.cpp)\nendif ()\n\nif (COSMA_GPU_BACKEND MATCHES \"ROCM\" OR COSMA_GPU_BACKEND MATCHES \"CUDA\")\n  list(APPEND cosma_src_files \"pinned_buffers.cpp\")\n  if (COSMA_WITH_NCCL OR COSMA_WITH_RCCL)\n    list(APPEND cosma_src_files \"gpu/nccl_utils.cpp\")\n  endif()\n  if (COSMA_WITH_GPU_AWARE_MPI)\n    list(APPEND cosma_src_files \"gpu/gpu_aware_mpi_utils.cpp\")\n  endif()\nendif()\n\nadd_library(cosma ${cosma_src_files})\n\ntarget_include_directories(cosma PUBLIC\n  $<BUILD_INTERFACE:${cosma_SOURCE_DIR}/src>\n)\n\ntarget_compile_features(cosma PUBLIC cxx_std_14)\ntarget_link_libraries(cosma PUBLIC\n  MPI::MPI_CXX\n  costa::costa\n  $<TARGET_NAME_IF_EXISTS:roc::rccl>\n  $<TARGET_NAME_IF_EXISTS:cosma::nccl>\n  $<$<STREQUAL:${COSMA_GPU_BACKEND},OFF>:cosma::BLAS::blas>\n  $<TARGET_NAME_IF_EXISTS:Tiled-MM::Tiled-MM>\n  $<$<STREQUAL:${COSMA_GPU_BACKEND},CUDA>:Tiled-MM::Tiled-MM>\n  $<$<STREQUAL:${COSMA_GPU_BACKEND},ROCM>:Tiled-MM::Tiled-MM>\n  $<$<BOOL:${COSMA_WITH_PROFILING}>:semiprof::semiprof>\n  $<$<BOOL:${COSMA_SCALAPACK}>:cosma::scalapack::scalapack>)\n\ntarget_compile_definitions(cosma PUBLIC\n                              $<$<BOOL:${COSMA_WITH_NCCL}>:COSMA_WITH_NCCL>\n                              $<$<STREQUAL:${COSMA_GPU_BACKEND},ROCM>:__HIP_PLATFORM_HCC__>\n                              $<$<BOOL:${COSMA_WITH_GPU_AWARE_MPI}>:COSMA_WITH_GPU_AWARE_MPI>\n                              $<$<BOOL:${COSMA_WITH_NCCL}>:COSMA_WITH_NCCL>\n                              $<$<BOOL:${COSMA_WITH_RCCL}>:COSMA_WITH_NCCL>\n                              $<$<STREQUAL:${COSMA_BLAS_VENDOR},MKL>:COSMA_WITH_MKL_BLAS>\n                              $<$<STREQUAL:${COSMA_BLAS_VENDOR},BLIS>:COSMA_WITH_BLIS_BLAS>\n                              $<$<NOT:$<IN_LIST:${COSMA_BLAS_VENDOR},MKL;BLIS;OFF>>:COSMA_WITH_BLAS>\n                              $<$<STREQUAL:${COSMA_GPU_BACKEND},CUDA>:COSMA_HAVE_GPU>\n                              $<$<STREQUAL:${COSMA_GPU_BACKEND},ROCM>:COSMA_HAVE_GPU>\n                              PRIVATE\n                              $<$<BOOL:${COSMA_WITH_PROFILING}>:COSMA_WITH_PROFILING>)\n\n\nlist(APPEND INSTALLED_TARGETS_LIST \"cosma\")\n\n# if SCALAPACK is found and cosma_pxgemm library is not already created\n# then create it here and link it to the profiler if needed\n# build as a shared library is necessary here because of the function interposing\nif(COSMA_SCALAPACK)\n  if (NOT TARGET cosma_pxgemm AND BUILD_SHARED_LIBS)\n    add_library(cosma_pxgemm scalapack.cpp\n      pxgemm_params.hpp\n      cosma_pxgemm.cpp\n      pxgemm.cpp\n    )\n\n    target_link_libraries(cosma_pxgemm PUBLIC cosma\n    $<$<BOOL:${COSMA_WITH_PROFILING}>:semiprof::semiprof>)\n\n    target_compile_definitions(cosma_pxgemm PRIVATE $<$<BOOL:${COSMA_WITH_PROFILING}>:COSMA_WITH_PROFILING>)\n    list(APPEND INSTALLED_TARGETS_LIST \"cosma_pxgemm\")\n  endif()\n\n  # this is a library exposing the prefixed scalapack API (with cosma/COSMA prefix)\n  # it is aimed for users who don't want to overwrite the available scalapack API with cosma.\n  # if SCALAPACK is found and cosma_prefixed_pxgemm library is not already created\n  # then create it here and link it to the profiler if needed\n    add_library(cosma_prefixed_pxgemm scalapack.cpp\n      pxgemm_params.hpp\n      prefixed_pxgemm.cpp\n      cosma_pxgemm.cpp\n    )\n    target_link_libraries(cosma_prefixed_pxgemm\n    PUBLIC cosma\n    PRIVATE $<$<BOOL:${COSMA_WITH_PROFILING}>:semiprof::semiprof>)\n    target_compile_definitions(cosma_prefixed_pxgemm PRIVATE $<$<BOOL:${COSMA_WITH_PROFILING}>:COSMA_WITH_PROFILING>)\n    list(APPEND INSTALLED_TARGETS_LIST \"cosma_prefixed_pxgemm\")\n\n  # the following library is aimed only for testing purposes\n  # it provides templated cosma::pxgemm call without\n  # pxgemm.h, so that pxgemm calls of scalapack are not overwritten\n  # and can still be compared to scalapack for correctness check\n  if(NOT TARGET cosma_pxgemm_cpp)\n    add_library(cosma_pxgemm_cpp scalapack.cpp\n      pxgemm_params.hpp\n      cosma_pxgemm.cpp\n    )\n    target_link_libraries(cosma_pxgemm_cpp\n                            PUBLIC cosma\n                            PRIVATE $<$<BOOL:${COSMA_WITH_PROFILING}>:semiprof::semiprof>)\n\n    target_compile_definitions(cosma_pxgemm_cpp\n                                  PRIVATE $<$<BOOL:${COSMA_WITH_PROFILING}>:COSMA_WITH_PROFILING>)\n    list(APPEND INSTALLED_TARGETS_LIST \"cosma_pxgemm_cpp\")\n  endif()\nendif()\n\n  install(TARGETS ${INSTALLED_TARGETS_LIST}\n    EXPORT cosma_targets\n    LIBRARY DESTINATION \"${CMAKE_INSTALL_LIBDIR}\"\n    ARCHIVE DESTINATION \"${CMAKE_INSTALL_LIBDIR}\"\n    INCLUDES DESTINATION \"${CMAKE_INSTALL_INCLUDEDIR}\")\n\n  install(EXPORT cosma_targets\n    FILE cosmaTargets.cmake\n    NAMESPACE cosma::\n    DESTINATION \"${CMAKE_INSTALL_LIBDIR}/cmake/cosma\")\n"
  },
  {
    "path": "src/cosma/aligned_allocator.hpp",
    "content": "#pragma once\n\n#include <mpi.h>\n\n#include <cassert>\n#include <cosma/environment_variables.hpp>\n#include <cosma/math_utils.hpp>\n#include <exception>\n#include <iostream>\n#include <limits>\n\n/*\n * A custom allocator that:\n *   - allocates the memory encouriging the use of huge pages\n *   - deallocates the memory\n */\n\nnamespace cosma {\ntemplate <typename T>\nclass aligned_allocator {\n  public:\n    using value_type = T;\n    using pointer = value_type *;\n    using const_pointer = const value_type *;\n    using reference = value_type &;\n    using const_reference = const value_type &;\n    using size_type = std::size_t;\n    using difference_type = std::ptrdiff_t;\n\n    // the alignement can be specified by the environment variable\n    // or take its default value otherwise.\n    // The default sizes, as well as the environment variable names\n    // are defined in <cosma/environment_variables.hpp>\n    static int get_alignment() {\n        static int alignment = cosma::get_cosma_cpu_memory_alignment();\n        return alignment;\n    }\n\n    // the minimum alignment for given type T\n    std::size_t min_alignment() {\n        return std::max(math_utils::next_power_of_2(sizeof(T)), sizeof(void *));\n    }\n\n    // Calculate how many additional elements we have to allocate for an array\n    // of length n and data type T.\n    static std::size_t get_alignment_padding(std::size_t n) {\n        auto alignment = get_alignment();\n        assert(alignment > 0);\n        // Calculate the remainder in bytes (since the alignment is in bytes)\n        auto remainder = (n * sizeof(T)) % alignment;\n\n        // Convert the padding from bytes to the number of elements\n        remainder = remainder != 0 ? (alignment - remainder) / sizeof(T) : 0;\n\n        // std::cout << \"For size \" << n << \", reminder = \" << remainder <<\n        // std::endl; std::cout << \"sizeof(T) = \" << sizeof(T) << std::endl;\n        return remainder;\n    }\n\n    // allocate memory with alignment specified as a template parameter\n    // returns nullptr on failure\n    T *aligned_malloc(std::size_t size) {\n        auto alignment = get_alignment();\n        // if alignment is disabled, use the standard malloc\n        if (alignment <= 0) {\n            return reinterpret_cast<T *>(malloc(size * sizeof(T)));\n        }\n        // check if the requested size is a multiple of the alignment\n        assert(get_alignment_padding(size) == 0);\n        // check if the alignment is >= min_alignment for this data type T\n        assert(alignment >= min_alignment());\n        // check if the alignment is a power of 2 and a multiple of\n        // sizeof(void*).\n        assert(math_utils::is_power_of_2(alignment));\n        // \"Memory alignment must be a power of 2.\");\n        // This is required for the posix_memalign function.\n        assert(alignment % sizeof(void *) == 0);\n        // \"Memory alignment must be a multiple of sizeof(void*)\");\n        void *ptr;\n        if (posix_memalign(&ptr, alignment, size * sizeof(T)) == 0) {\n            return reinterpret_cast<T *>(ptr);\n        }\n        return nullptr;\n    }\n\n    aligned_allocator() {}\n    ~aligned_allocator() {}\n\n    aligned_allocator(aligned_allocator const &) {}\n\n    pointer address(reference r) { return &r; }\n\n    const_pointer address(const_reference r) { return &r; }\n\n    pointer allocate(size_type cnt,\n                     const void* = nullptr) {\n        if (cnt > 0) {\n            pointer ptr;\n            if (!cosma::get_unified_memory()) {\n                ptr = aligned_malloc(cnt);\n\t    }\n#if defined(COSMA_USE_UNIFIED_MEMORY)\n            else {\n                hipMalloc(&ptr, cnt * sizeof(T));\n\t    }\n#endif\n            return ptr;\n        }\n        return nullptr;\n    }\n\n    void deallocate(pointer p, size_type cnt) {\n        if (p) {\n            if (!cosma::get_unified_memory())\n                std::free(p);\n#if defined(COSMA_USE_UNIFIED_MEMORY)\n            else\n                hipFree(p);\n#endif\n        }\n    }\n\n    size_type max_size() const {\n        return std::numeric_limits<size_type>::max() / sizeof(T);\n    }\n\n    void construct(pointer p, const T &t) { new (p) T(t); }\n\n    void destroy(pointer p) {\n        if (p) {\n            p->~T();\n        }\n    }\n\n    bool operator==(aligned_allocator const &) { return true; }\n\n    bool operator!=(aligned_allocator const &a) { return !operator==(a); }\n};\n\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/blacs.hpp",
    "content": "#pragma once\n#include <mpi.h>\n\nnamespace cosma {\nnamespace blacs {\nextern \"C\" {\n    // Initialization\n    void Cblacs_pinfo(int* mypnum, int* nprocs);\n    void Cblacs_setup(int* mypnum, int* nprocs);\n    void Cblacs_set(int ictxt, int what, int* val);\n    void Cblacs_get(int ictxt, int what, int* val);\n    void Cblacs_gridinit(int* ictxt, char* order, int nprow, int npcol);\n    void Cblacs_gridmap(int* ictxt, int* usermap, int ldup, int nprow, int npcol);\n\n    // Finalization\n    void Cblacs_freebuff(int ictxt, int wait);\n    void Cblacs_gridexit(int ictxt);\n    void Cblacs_exit(int NotDone);\n\n    // Abort\n    void Cblacs_abort(int ictxt, int errno);\n\n    // Information\n    void Cblacs_gridinfo(int ictxt, int* nprow, int* npcol, int* myrow, int* mycol);\n    int Cblacs_pnum(int ictxt, int prow, int pcol);\n    void Cblacs_pcoord(int ictxt, int nodenum, int* prow, int* pcol);\n\n    // Barrier\n    void Cblacs_barrier(int ictxt, char* scope);\n\n    // MPI communicator <-> Blacs context\n    MPI_Comm Cblacs2sys_handle(int ictxt);\n    int Csys2blacs_handle(MPI_Comm mpi_comm);\n    void Cfree_blacs_system_handle(int i_sys_ctxt);\n}\n}}\n"
  },
  {
    "path": "src/cosma/blas.cpp",
    "content": "#include <cosma/blas.hpp>\n\n// extern \"C\" {\n#ifdef COSMA_WITH_MKL_BLAS\n#include <mkl.h>\n#endif\n\n#ifdef COSMA_WITH_BLIS_BLAS\n#include <blis.h>\n#endif\n\n#ifdef COSMA_WITH_BLAS\n#include <cblas.h>\n// this is for backward compatibility,\n// in case CBLAS_LAYOUT is not defined\ntypedef CBLAS_ORDER CBLAS_LAYOUT;\n#endif\n// }\n\n// The file is not needed if GPU is used\n//\n#if defined(COSMA_WITH_MKL_BLAS) || defined(COSMA_WITH_BLIS_BLAS) || defined(COSMA_WITH_BLAS)\nnamespace cosma {\nvoid gemm(const int M,\n          const int N,\n          const int K,\n          const double alpha,\n          const double *A,\n          const int lda,\n          const double *B,\n          const int ldb,\n          const double beta,\n          double *C,\n          const int ldc) {\n    cblas_dgemm(CBLAS_LAYOUT::CblasColMajor,\n                CBLAS_TRANSPOSE::CblasNoTrans,\n                CBLAS_TRANSPOSE::CblasNoTrans,\n                M,\n                N,\n                K,\n                alpha,\n                A,\n                lda,\n                B,\n                ldb,\n                beta,\n                C,\n                ldc);\n}\n\nvoid gemm(const int M,\n          const int N,\n          const int K,\n          const std::complex<double> alpha,\n          const std::complex<double> *A,\n          const int lda,\n          const std::complex<double> *B,\n          const int ldb,\n          const std::complex<double> beta,\n          std::complex<double> *C,\n          const int ldc) {\n    cblas_zgemm(CBLAS_LAYOUT::CblasColMajor,\n                CBLAS_TRANSPOSE::CblasNoTrans,\n                CBLAS_TRANSPOSE::CblasNoTrans,\n                M,\n                N,\n                K,\n                reinterpret_cast<const double*>(&alpha),\n                reinterpret_cast<const double*>(A),\n                lda,\n                reinterpret_cast<const double*>(B),\n                ldb,\n                reinterpret_cast<const double*>(&beta),\n                reinterpret_cast<double*>(C),\n                ldc);\n}\n\nvoid gemm(const int M,\n          const int N,\n          const int K,\n          const float alpha,\n          const float *A,\n          const int lda,\n          const float *B,\n          const int ldb,\n          const float beta,\n          float *C,\n          const int ldc) {\n    cblas_sgemm(CBLAS_LAYOUT::CblasColMajor,\n                CBLAS_TRANSPOSE::CblasNoTrans,\n                CBLAS_TRANSPOSE::CblasNoTrans,\n                M,\n                N,\n                K,\n                alpha,\n                A,\n                lda,\n                B,\n                ldb,\n                beta,\n                C,\n                ldc);\n}\n\nvoid gemm(const int M,\n          const int N,\n          const int K,\n          const std::complex<float> alpha,\n          const std::complex<float> *A,\n          const int lda,\n          const std::complex<float> *B,\n          const int ldb,\n          const std::complex<float> beta,\n          std::complex<float> *C,\n          const int ldc) {\n    cblas_cgemm(CBLAS_LAYOUT::CblasColMajor,\n                CBLAS_TRANSPOSE::CblasNoTrans,\n                CBLAS_TRANSPOSE::CblasNoTrans,\n                M,\n                N,\n                K,\n                reinterpret_cast<const float*>(&alpha),\n                reinterpret_cast<const float*>(A),\n                lda,\n                reinterpret_cast<const float*>(B),\n                ldb,\n                reinterpret_cast<const float*>(&beta),\n                reinterpret_cast<float*>(C),\n                ldc);\n}\n\n} // namespace cosma\n#endif\n"
  },
  {
    "path": "src/cosma/blas.hpp",
    "content": "#pragma once\n#include <complex>\n\nnamespace cosma {\nvoid gemm(const int M,\n          const int N,\n          const int K,\n          const double alpha,\n          const double *A,\n          const int lda,\n          const double *B,\n          const int ldb,\n          const double beta,\n          double *C,\n          const int ldc);\n\nvoid gemm(const int M,\n          const int N,\n          const int K,\n          const std::complex<double> alpha,\n          const std::complex<double> *A,\n          const int lda,\n          const std::complex<double> *B,\n          const int ldb,\n          const std::complex<double> beta,\n          std::complex<double> *C,\n          const int ldc);\n\nvoid gemm(const int M,\n          const int N,\n          const int K,\n          const float alpha,\n          const float *A,\n          const int lda,\n          const float *B,\n          const int ldb,\n          const float beta,\n          float *C,\n          const int ldc);\n\nvoid gemm(const int M,\n          const int N,\n          const int K,\n          const std::complex<float> alpha,\n          const std::complex<float> *A,\n          const int lda,\n          const std::complex<float> *B,\n          const int ldb,\n          const std::complex<float> beta,\n          std::complex<float> *C,\n          const int ldc);\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/buffer.cpp",
    "content": "#include <cosma/buffer.hpp>\n#include <cosma/context.hpp>\n#include <cosma/profiler.hpp>\n#include <complex>\n\n#include <algorithm>\n\nnamespace cosma {\n\ntemplate<typename T>\nBuffer<T>::Buffer(): ctxt_(nullptr) {}\n\ntemplate <typename T>\nBuffer<T>::Buffer(cosma_context<T>* ctxt,\n                  Mapper *mapper,\n                  Layout *layout,\n                  bool dry_run)\n    : ctxt_(ctxt)\n    , strategy_(&(mapper->strategy()))\n    , label_(mapper->label())\n    , rank_(mapper->rank())\n    , mapper_(mapper)\n    , layout_(layout) {\n\n    PE(preprocessing_matrices_buffer);\n    compute_n_buckets();\n\n    max_base_buffer_size_ = 0;\n    max_reduce_buffer_size_ = 0;\n    max_reshuffle_buffer_size_ = 0;\n    current_buffer_ = 0;\n    max_send_buffer_size_ = (size_t)mapper_->initial_size();\n    max_recv_buffer_size_ = (size_t)mapper_->initial_size();\n\n    init_first_split_steps();\n    buff_sizes_ = compute_buffer_size();\n\n    // to account for possible swapping with the reduce buffer\n    // that occurs if k split is present and beta != 0\n    if (label_ == 'C') {\n        for (int step = 0; step < strategy_->n_steps(); ++step) {\n            if (strategy_->split_k(step) && strategy_->parallel_step(step)) {\n                max_reduce_buffer_size_ = std::max(\n                          max_reduce_buffer_size_,\n                          *max_element(buff_sizes_.begin(), buff_sizes_.end()));\n                break;\n            }\n        }\n    }\n\n    allocate_initial_buffers(dry_run);\n    PL();\n}\n\ntemplate <typename T>\nBuffer<T>::Buffer(Mapper *mapper,\n                  Layout *layout,\n                  bool dry_run)\n    : Buffer(get_context_instance<T>(), mapper, layout, dry_run) {}\n\ntemplate <typename T>\nvoid Buffer<T>::allocate_communication_buffers(bool dry_run) {\n    if (!dry_run && rank_ < strategy_->P && buff_sizes_.size() > 1) {\n        // check if the initial buffer is already initialized\n        assert(buffers_.size() == 1);\n        // initial buffer is already allocated, so start from 1\n        for (int i = 1; i < buff_sizes_.size(); ++i) {\n            auto id = ctxt_->get_memory_pool().get_buffer_id(buff_sizes_[i]);\n            buffers_.push_back(id);\n        }\n\n        if (max_reshuffle_buffer_size_ > 0) {\n            reshuffle_buffer_ = ctxt_->get_memory_pool().get_buffer_id(max_reshuffle_buffer_size_);\n        }\n\n        if (max_reduce_buffer_size_ > 0) {\n            reduce_buffer_ = ctxt_->get_memory_pool().get_buffer_id(max_reduce_buffer_size_);\n        }\n#ifdef DEBUG\n        for (int rank = 0; rank < strategy_->P; ++rank) {\n            if (rank_ == rank) {\n                std::cout << \"Rank \" << rank_ << \" buffers\" << std::endl;\n                std::cout << \"Buffer sizes for matrix \" << label_ << \" on rank \" << rank_\n                          << std::endl;\n                std::cout << \"max_reshuffle_buffer_size_ = \" << max_reshuffle_buffer_size_\n                          << std::endl;\n                std::cout << \"max_reduce_buffer_size_ = \" << max_reduce_buffer_size_\n                          << std::endl;\n                std::cout << \"max_send_buffer_size_ = \" << max_send_buffer_size_\n                          << std::endl;\n                std::cout << \"max_recv_buffer_size_ = \" << max_recv_buffer_size_\n                          << std::endl;\n                std::cout << \"max_base_buffer_size_ = \" << max_base_buffer_size_\n                          << std::endl;\n                for (int i = 0; i < buff_sizes_.size(); ++i) {\n                    std::cout << \"buffer\" << i << \" size = \" << buff_sizes_[i] << std::endl;\n                }\n            }\n            // MPI_Barrier(MPI_COMM_WORLD);\n        }\n#endif\n    }\n}\n\ntemplate <typename T>\nstd::vector<std::size_t> Buffer<T>::get_all_buffer_sizes() {\n    std::vector<std::size_t> buffer_sizes;\n    if (rank_ < strategy_->P) {\n        if (buff_sizes_.size() >= 1) {\n            buffer_sizes.push_back(std::max(\n                                       (size_t) buff_sizes_[0],\n                                       mapper_->initial_size()\n                                   )\n                                  );\n        }\n        for (int i = 1; i < buff_sizes_.size(); ++i) {\n            buffer_sizes.push_back(buff_sizes_[i]);\n        }\n        if (max_reduce_buffer_size_ > 0) {\n            buffer_sizes.push_back(max_reduce_buffer_size_);\n        }\n        if (max_reshuffle_buffer_size_ > 0) {\n            buffer_sizes.push_back(max_reshuffle_buffer_size_);\n        }\n    }\n\n    return buffer_sizes;\n}\n\ntemplate <typename T>\nvoid Buffer<T>::allocate_initial_buffers(bool dry_run) {\n    if (!dry_run && rank_ < strategy_->P && buff_sizes_.size() > 0) {\n        buffers_.reserve(buff_sizes_.size());\n\n        // allocate initial buffer (to store the matrix)\n        buff_sizes_[0] = std::max((size_t) buff_sizes_[0], mapper_->initial_size());\n        auto id = ctxt_->get_memory_pool().get_buffer_id(buff_sizes_[0]);\n        assert(buffers_.size() == 0);\n        buffers_.push_back(id);\n    }\n}\n\ntemplate <typename T>\nvoid Buffer<T>::free_initial_buffers(bool dry_run) {\n    if (!dry_run && rank_ < strategy_->P && buff_sizes_.size() > 0) {\n        // check if all the other buffers were deallocated previously\n        // buff_sizes_ is equal to n_buffers throughout the lifetime of the class\n        // but buffers_ size is decreased whenever some buffer is freed\n        assert(buffers_.size() == 1);\n\n        // deallocate initial buffer (that are storing the matrix)\n        auto ptr = ctxt_->get_memory_pool().get_buffer_pointer(buffers_[0]);\n        ctxt_->get_memory_pool().free_buffer(ptr, buff_sizes_[0]);\n        // remove the pointers pointing to them\n        buffers_.pop_back();\n        buff_sizes_.pop_back();\n    }\n}\n\ntemplate <typename T>\nvoid Buffer<T>::free_communication_buffers(bool dry_run) {\n    if (dry_run || rank_ >= strategy_->P || buff_sizes_.size() <= 1) return;\n    // deallocate reshuffle and reduce buffers separately\n    if (max_reduce_buffer_size_ > 0) {\n        auto ptr = ctxt_->get_memory_pool().get_buffer_pointer(reduce_buffer_);\n        ctxt_->get_memory_pool().free_buffer(ptr, max_reduce_buffer_size_);\n    }\n\n    if (max_reshuffle_buffer_size_ > 0) {\n        auto ptr = ctxt_->get_memory_pool().get_buffer_pointer(reshuffle_buffer_);\n        ctxt_->get_memory_pool().free_buffer(ptr, max_reshuffle_buffer_size_);\n    }\n\n    // if there are no communication buffers left, skip\n    if (buff_sizes_.size() == 1)\n        return;\n\n    int n_buffers = buff_sizes_.size();\n    // i = 0 is the initial buffer storing the matrix, so we skip this one.\n    for (int i = n_buffers-1; i >= 1; --i) {\n        auto ptr = ctxt_->get_memory_pool().get_buffer_pointer(buffers_.back());\n        ctxt_->get_memory_pool().free_buffer(ptr, buff_sizes_[i]);\n        // remove the pointers pointing to them\n        buffers_.pop_back();\n    }\n}\n\ntemplate <typename T>\nBuffer<T>::~Buffer() {\n    // check if communication buffers are already deallocated\n    // buffers_.size() can also be 0 if the buffer was default constructed\n    if (buffers_.size() > 0) {\n        free_initial_buffers();\n    }\n}\n\ntemplate <typename T>\nvoid Buffer<T>::compute_n_buckets() {\n    if (strategy_->empty()) \n        return ;\n    n_buckets_ = std::vector<int>(strategy_->n_steps());\n    expanded_after_ = std::vector<bool>(strategy_->n_steps());\n    int prod_n_seq = 1;\n\n    bool expanded = false;\n\n    for (int step = strategy_->n_steps() - 1; step >= 0; --step) {\n        // if the current step is sequential and this matrix was split\n        // then update the product of all seq steps in\n        // which this matrix was split, which represents\n        // the number of buckets\n        if (strategy_->sequential_step(step)) {\n            if (strategy_->split(label_, step)) {\n                prod_n_seq *= strategy_->divisor(step);\n            }\n        } else {\n            // if the current matrix was expanded (i.e. NOT split)\n            if (!strategy_->split(label_, step)) {\n                expanded = true;\n            }\n        }\n        n_buckets_[step] = prod_n_seq;\n        expanded_after_[step] = expanded;\n    }\n}\n\ntemplate <typename T>\nvoid Buffer<T>::init_first_split_steps() {\n    int step = 0;\n    first_seq_split_step = -1;\n    last_first_seq_split_step = -1;\n    first_par_extend_step = -1;\n\n    while (step < strategy_->n_steps()) {\n        if (strategy_->sequential_step(step) &&\n            strategy_->split(label_, step)) {\n            // split in seq\n            if (first_par_extend_step < 0 && first_seq_split_step < 0) {\n                // first_seq_step not yet found\n                first_seq_split_step = step;\n                last_first_seq_split_step = step;\n            } else if (first_par_extend_step < 0) {\n                // par step still did not occur\n                last_first_seq_split_step = step;\n            } else {\n                break;\n            }\n        } else if (strategy_->parallel_step(step) &&\n                   !strategy_->split(label_, step)) {\n            // expanded\n            if (first_par_extend_step < 0) {\n                // first par step still was not found\n                first_par_extend_step = step;\n            } else {\n                break;\n            }\n        }\n        step++;\n    }\n}\n\ntemplate <typename T>\nint Buffer<T>::buff_index_before_gemm() const {\n    if (buffers_.size() == 0)\n        return -1;\n    if (buffers_.size() == 1)\n        return 0;\n    // std::cout << \"par steps before gemm for \" << label_ << \" = \" <<\n    // strategy_->par_steps_before_gemm(label_) << std::endl;\n    return strategy_->parallel_steps_before_gemm(label_) % 2 != 0\n               ? buffers_.size() - 1\n               : buffers_.size() - 2;\n}\n\ntemplate <typename T>\nT* Buffer<T>::buffer_ptr() {\n    auto ptr = ctxt_->get_memory_pool().get_buffer_pointer(buffers_[current_buffer_]);\n    return ptr;\n}\n\ntemplate <typename T>\nconst T* Buffer<T>::buffer_ptr() const {\n    auto ptr = ctxt_->get_memory_pool().get_buffer_pointer(buffers_[current_buffer_]);\n    return ptr;\n}\n\ntemplate <typename T>\nconst size_t Buffer<T>::buffer_size() const {\n    return buff_sizes_[current_buffer_];\n}\n\ntemplate <typename T>\nint Buffer<T>::buffer_index() {\n    return current_buffer_;\n}\n\ntemplate <typename T>\nvoid Buffer<T>::set_buffer_index(int idx) {\n    current_buffer_ = idx;\n}\n\ntemplate <typename T>\nvoid Buffer<T>::swap_reduce_buffer_with(size_t buffer_idx) {\n    std::swap(buffers_[buffer_idx], reduce_buffer_);\n    std::swap(buff_sizes_[buffer_idx], max_reduce_buffer_size_);\n}\n\ntemplate <typename T>\ntypename Buffer<T>::scalar_t *Buffer<T>::reshuffle_buffer_ptr() {\n    if (max_reshuffle_buffer_size_ > 0)\n        return ctxt_->get_memory_pool().get_buffer_pointer(reshuffle_buffer_);\n    return nullptr;\n}\n\ntemplate <typename T>\ntypename Buffer<T>::scalar_t *Buffer<T>::reduce_buffer_ptr() {\n    if (max_reduce_buffer_size_ > 0) {\n        return ctxt_->get_memory_pool().get_buffer_pointer(reduce_buffer_);\n    }\n    return nullptr;\n}\n\ntemplate <typename T>\nT* Buffer<T>::initial_buffer_ptr() {\n    if (buffers_.size() == 0) {\n        return nullptr;\n    }\n    return ctxt_->get_memory_pool().get_buffer_pointer(buffers_[0]);\n}\n\ntemplate <typename T>\nconst T* Buffer<T>::initial_buffer_ptr() const {\n    if (buffers_.size() == 0) {\n        return nullptr;\n    }\n    return ctxt_->get_memory_pool().get_buffer_pointer(buffers_[0]);\n}\n\ntemplate <typename T>\nconst size_t Buffer<T>::initial_buffer_size() const {\n    if (buff_sizes_.size() == 0) {\n        return 0;\n    }\n    return buff_sizes_[0];\n}\n\n// increases the index of the current buffer\ntemplate <typename T>\nvoid Buffer<T>::advance_buffer() {\n    // if we are at the last buffer, we then \"swap\" it with the pre-last buffer.\n    // we do this by letting the current index point to the pre-last buffer.\n    if (current_buffer_ == buffers_.size() - 1)\n        current_buffer_--;\n    else\n        current_buffer_++;\n\n    // should never happen\n    if (current_buffer_ < 0)\n        current_buffer_ = 0;\n}\n\ntemplate <typename T>\nstd::vector<size_t> Buffer<T>::compute_buffer_size() {\n    if (strategy_->empty()) {\n        return {(size_t)mapper_->initial_size()};\n    }\n\n    Interval m(0, strategy_->m - 1);\n    Interval n(0, strategy_->n - 1);\n    Interval k(0, strategy_->k - 1);\n    Interval P(0, strategy_->P - 1);\n\n    // assume most memory-consuming case when beta=T{1}\n    return compute_buffer_size(m, n, k, P, 0, rank_, T{1});\n}\n\ntemplate <typename T>\nstd::vector<size_t> Buffer<T>::compute_buffer_size(Interval &m,\n                                                      Interval &n,\n                                                      Interval &k,\n                                                      Interval &P,\n                                                      int step,\n                                                      int rank,\n                                                      scalar_t beta) {\n    std::vector<size_t> sizes;\n    // current submatrices that are being computed\n    Interval2D a_range(m, k);\n    Interval2D b_range(k, n);\n    Interval2D c_range(m, n);\n    Interval2D range;\n\n    // For each of P processors remember which sequential bucket we are\n    // currently on\n    std::vector<int> buckets = layout_->seq_buckets(P);\n    // Skip all buckets that are \"before\" the current submatrices.\n    // the relation submatrix1 <before> submatrix2 is defined in Interval2D.\n    // Intuitively, this will skip all the buckets that are \"above\" or \"on the\n    // left\" of the current submatrices. We say \"before\" because whenever we\n    // split in sequential sequentially, we always first start with the \"above\"\n    // submatrix (if the splitting is horizontal) or with the left one (if the\n    // splitting is vertical). which explains the name of the relation \"before\".\n    if (label_ == 'A') {\n        range = a_range;\n    } else if (label_ == 'B') {\n        range = b_range;\n    } else {\n        range = c_range;\n    }\n    layout_->update_buckets(P, range);\n\n    // check the base case\n    if (n_buckets_[step] == 1) {\n        compute_max_buffer_size(m, n, k, P, step, rank, beta);\n        layout_->set_seq_buckets(P, buckets);\n        if (expanded_after_[step]) {\n            return {max_recv_buffer_size_, max_recv_buffer_size_};\n        } else {\n            // return {max_recv_buffer_size_, max_recv_buffer_size_};\n            return {max_recv_buffer_size_};\n        }\n        // if (expanded_after_[step])\n        //     return {max_recv_buffer_size_, max_recv_buffer_size_};\n        // else\n        //     return {};\n    }\n    // invoke a parallel or a sequential step:\n    if (strategy_->sequential_step(step)) {\n        int div = strategy_->divisor(step);\n        int divm = strategy_->divisor_m(step);\n        int divn = strategy_->divisor_n(step);\n        int divk = strategy_->divisor_k(step);\n\n        for (int i = 0; i < div; ++i) {\n            Interval newm = m.subinterval(divm, divm > 1 ? i : 0);\n            Interval newn = n.subinterval(divn, divn > 1 ? i : 0);\n            Interval newk = k.subinterval(divk, divk > 1 ? i : 0);\n\n            // update beta value\n            scalar_t new_beta = beta;\n            if (label_ == 'C' && divk > 1) {\n                new_beta = 1;\n                // new_beta = i == 0 && beta == 0 ? 0 : 1;\n            }\n\n            // compute substeps\n            std::vector<size_t> subsizes = compute_buffer_size(\n                newm, newn, newk, P, step + 1, rank, new_beta);\n\n            // initialize the sizes vector in the first branch of sequential\n            if (i == 0) {\n                sizes = std::vector<size_t>(subsizes.size());\n            }\n\n            // finds the maximum buffer size for each step among all sequential\n            // branches\n            for (int j = 0; j < sizes.size(); ++j) {\n                sizes[j] = std::max(sizes[j], subsizes[j]);\n            }\n\n            // if dividing over absent dimension, then all the branches are the\n            // same so skip the rest\n            if (!strategy_->split(label_, step)) {\n                break;\n            }\n        }\n        if (strategy_->split(label_, step)) {\n            int max_size = 0;\n            std::vector<int> block_sizes =\n                layout_->sizes_inside_range(range, rank_, max_size);\n\n            if (first_par_extend_step < 0 || step < first_par_extend_step) {\n                if (step == last_first_seq_split_step) {\n                    sizes.insert(sizes.begin(), max_size);\n                } else {\n                    sizes[0] = std::max(sizes[0], (size_t)max_size);\n                }\n            }\n        }\n    } else {\n        int div = strategy_->divisor(step);\n        int divm = strategy_->divisor_m(step);\n        int divn = strategy_->divisor_n(step);\n        int divk = strategy_->divisor_k(step);\n        // processor subinterval which the current rank belongs to\n        int partition_idx = P.subinterval_index(div, rank);\n        Interval newP = P.subinterval(div, partition_idx);\n        // intervals of M, N and K that the current rank is in charge of,\n        // together with other ranks from its group.\n        // (see the definition of group and offset below)\n        Interval newm = m.subinterval(divm, divm > 1 ? partition_idx : 0);\n        Interval newn = n.subinterval(divn, divn > 1 ? partition_idx : 0);\n        Interval newk = k.subinterval(divk, divk > 1 ? partition_idx : 0);\n\n        int offset = rank - newP.first();\n\n        std::vector<std::vector<int>> size_before_expansion(P.length());\n        std::vector<int> total_before_expansion(P.length());\n        std::vector<std::vector<int>> size_after_expansion(newP.length());\n        std::vector<int> total_after_expansion(newP.length());\n\n        size_t max_size = -1;\n\n        bool expanded = !strategy_->split(label_, step);\n\n        if (expanded) {\n            /*\n             * this gives us the 2D interval of the matrix that will be\n             expanded: if divm > 1 => matrix B expanded => Interval2D(k, n) if\n             divn > 1 => matrix A expanded => Interval2D(m, k) if divk > 1 =>\n             matrix C expanded => Interval2D(m, n)\n            */\n            Interval2D range;\n\n            if (divm > 1)\n                range = Interval2D(k, n);\n            else if (divn > 1)\n                range = Interval2D(m, k);\n            else\n                range = Interval2D(m, n);\n\n            layout_->buffers_before_expansion(\n                P, range, size_before_expansion, total_before_expansion);\n\n            layout_->buffers_after_expansion(P,\n                                             newP,\n                                             size_before_expansion,\n                                             total_before_expansion,\n                                             size_after_expansion,\n                                             total_after_expansion);\n\n            // increase the buffer sizes before the substeps call\n            layout_->set_sizes(newP, size_after_expansion);\n\n            // this is the sum of sizes of all the buckets after expansion\n            // that the current rank will own.\n            // which is also the size of the matrix after expansion\n            size_t old_size = total_before_expansion[rank - P.first()];\n            size_t new_size = total_after_expansion[rank - newP.first()];\n            max_size = std::max(old_size, new_size);\n\n            int n_blocks = size_before_expansion[rank - P.first()].size();\n\n            if (n_blocks > 1) {\n                max_reshuffle_buffer_size_ =\n                    std::max(max_reshuffle_buffer_size_, new_size);\n            }\n\n            // if C was expanded, then reduce was invoked\n            if (label_ == 'C' && beta != scalar_t{0}) {\n                int subint_index, subint_offset;\n                std::tie(subint_index, subint_offset) =\n                    P.locate_in_subinterval(div, rank);\n                int target =\n                    P.locate_in_interval(div, subint_index, subint_offset);\n                max_reduce_buffer_size_ =\n                    std::max(max_reduce_buffer_size_,\n                             (size_t)total_before_expansion[target]);\n            }\n        }\n\n        // if division by k, and we are in the branch where beta > 0, then\n        // reset beta to 0, but keep in mind that on the way back from substeps\n        // we will have to sum the result with the local data in C\n        // this is necessary since reduction happens AFTER the substeps\n        // so we cannot pass beta = 1 if the data is not present there BEFORE\n        // the substeps.\n        scalar_t new_beta = beta;\n        if (strategy_->split_k(step) && beta != scalar_t{0}) {\n            new_beta = scalar_t{0};\n        }\n\n        // invoke the substeps\n        std::vector<size_t> subsizes = compute_buffer_size(\n            newm, newn, newk, newP, step + 1, rank, new_beta);\n\n        if (expanded) {\n            sizes = std::vector<size_t>(subsizes.size() + 1);\n            sizes[0] = max_size;\n            std::copy(subsizes.begin(), subsizes.end(), sizes.begin() + 1);\n            // the buffer sizes are back to the previous values\n            // (the values at the beginning of this parallel step)\n            layout_->set_sizes(\n                newP, size_before_expansion, newP.first() - P.first());\n        } else {\n            sizes = subsizes;\n        }\n    }\n\n    // unshift(offset);\n    layout_->set_seq_buckets(P, buckets);\n    return sizes;\n}\n\ntemplate <typename T>\nvoid Buffer<T>::compute_max_buffer_size(Interval &m,\n                                        Interval &n,\n                                        Interval &k,\n                                        Interval &P,\n                                        int step,\n                                        int rank,\n                                        scalar_t beta) {\n    // current submatrices that are being computed\n    Interval2D a_range(m, k);\n    Interval2D b_range(k, n);\n    Interval2D c_range(m, n);\n\n    // For each of P processors remember which sequential bucket we are\n    // currently on\n    std::vector<int> buckets = layout_->seq_buckets(P);\n    // Skip all buckets that are \"before\" the current submatrices.\n    // the relation submatrix1 <before> submatrix2 is defined in Interval2D.\n    // Intuitively, this will skip all the buckets that are \"above\" or \"on the\n    // left\" of the current submatrices. We say \"before\" because whenever we\n    // split sequentially, we always first start with the \"above\" submatrix (if\n    // the splitting is horizontal) or with the left one (if the splitting is\n    // vertical). which explains the name of the relation \"before\".\n    if (label_ == 'A') {\n        layout_->update_buckets(P, a_range);\n    } else if (label_ == 'B') {\n        layout_->update_buckets(P, b_range);\n    } else {\n        layout_->update_buckets(P, c_range);\n    }\n\n    // int offset = shift(buckets[rank - P.first()]);\n\n    // invoke a parallel or a sequential step:\n    if (strategy_->final_step(step)) {\n        size_t max_size = 0;\n        if (label_ == 'A') {\n            max_size = 1LL * m.length() * k.length();\n        } else if (label_ == 'B') {\n            max_size = 1LL * k.length() * n.length();\n        } else {\n            max_size = 1LL * m.length() * n.length();\n        }\n\n        max_base_buffer_size_ = std::max(max_base_buffer_size_, max_size);\n\n        if (max_size > max_recv_buffer_size_) {\n            max_send_buffer_size_ = max_recv_buffer_size_;\n            max_recv_buffer_size_ = max_size;\n        } else if (max_size > max_send_buffer_size_) {\n            max_send_buffer_size_ = max_size;\n        }\n    } else if (strategy_->sequential_step(step)) {\n        int div = strategy_->divisor(step);\n        int divm = strategy_->divisor_m(step);\n        int divn = strategy_->divisor_n(step);\n        int divk = strategy_->divisor_k(step);\n\n        for (int i = 0; i < div; ++i) {\n            Interval newm = m.subinterval(divm, divm > 1 ? i : 0);\n            Interval newn = n.subinterval(divn, divn > 1 ? i : 0);\n            Interval newk = k.subinterval(divk, divk > 1 ? i : 0);\n\n            // update beta value\n            scalar_t new_beta = beta;\n            if (label_ == 'C' && divk > 1) {\n                if (i != 0) {\n                    new_beta = scalar_t{1};\n                }\n            }\n\n            compute_max_buffer_size(\n                newm, newn, newk, P, step + 1, rank, new_beta);\n\n            // if dividing over absent dimension, then all the branches are the\n            // same so skip the rest\n            if ((label_ == 'A' && !strategy_->split_A(step)) ||\n                (label_ == 'B' && !strategy_->split_B(step)) ||\n                (label_ == 'C' && !strategy_->split_C(step))) {\n                break;\n            }\n        }\n    } else {\n        int div = strategy_->divisor(step);\n        int divm = strategy_->divisor_m(step);\n        int divn = strategy_->divisor_n(step);\n        int divk = strategy_->divisor_k(step);\n        // processor subinterval which the current rank belongs to\n        int partition_idx = P.subinterval_index(div, rank);\n        Interval newP = P.subinterval(div, partition_idx);\n        // intervals of M, N and K that the current rank is in charge of,\n        // together with other ranks from its group.\n        // (see the definition of group and offset below)\n        Interval newm = m.subinterval(divm, divm > 1 ? partition_idx : 0);\n        Interval newn = n.subinterval(divn, divn > 1 ? partition_idx : 0);\n        Interval newk = k.subinterval(divk, divk > 1 ? partition_idx : 0);\n\n        int offset = rank - newP.first();\n\n        std::vector<std::vector<int>> size_before_expansion(P.length());\n        std::vector<int> total_before_expansion(P.length());\n        std::vector<std::vector<int>> size_after_expansion(newP.length());\n        std::vector<int> total_after_expansion(newP.length());\n\n        bool expanded = (label_ == 'A' && !strategy_->split_A(step)) ||\n                        (label_ == 'B' && !strategy_->split_B(step)) ||\n                        (label_ == 'C' && !strategy_->split_C(step));\n\n        if (expanded) {\n            /*\n             * this gives us the 2D interval of the matrix that will be\n             expanded: if divm > 1 => matrix B expanded => Interval2D(k, n) if\n             divn > 1 => matrix A expanded => Interval2D(m, k) if divk > 1 =>\n             matrix C expanded => Interval2D(m, n)\n            */\n            Interval2D range;\n\n            if (divm > 1)\n                range = Interval2D(k, n);\n            else if (divn > 1)\n                range = Interval2D(m, k);\n            else\n                range = Interval2D(m, n);\n\n            layout_->buffers_before_expansion(\n                P, range, size_before_expansion, total_before_expansion);\n\n            layout_->buffers_after_expansion(P,\n                                             newP,\n                                             size_before_expansion,\n                                             total_before_expansion,\n                                             size_after_expansion,\n                                             total_after_expansion);\n\n            // increase the buffer sizes before the substeps\n            layout_->set_sizes(newP, size_after_expansion);\n\n            // this is the sum of sizes of all the buckets after expansion\n            // that the current rank will own.\n            // which is also the size of the matrix after expansion\n            size_t old_size = total_before_expansion[rank - P.first()];\n            size_t new_size = total_after_expansion[rank - newP.first()];\n            size_t max_size = std::max(old_size, new_size);\n            if (max_size > max_recv_buffer_size_) {\n                max_send_buffer_size_ = max_recv_buffer_size_;\n                max_recv_buffer_size_ = max_size;\n            } else if (max_size > max_send_buffer_size_) {\n                max_send_buffer_size_ = max_size;\n            }\n\n            int n_blocks = size_before_expansion[rank - P.first()].size();\n\n            if (n_blocks > 1) {\n                max_reshuffle_buffer_size_ =\n                    std::max(max_reshuffle_buffer_size_, new_size);\n            }\n\n            // if C was expanded, then reduce was invoked\n            if (label_ == 'C') {\n                int subint_index, subint_offset;\n                std::tie(subint_index, subint_offset) =\n                    P.locate_in_subinterval(div, rank);\n                int target =\n                    P.locate_in_interval(div, subint_index, subint_offset);\n                max_reduce_buffer_size_ =\n                    std::max(max_reduce_buffer_size_,\n                             (size_t)total_before_expansion[target]);\n                // std::cout << \"max_reduce_buffer_size = \" <<\n                // max_reduce_buffer_size_ << std::endl;\n            }\n        }\n\n        // if division by k, and we are in the branch where beta > 0, then\n        // reset beta to 0, but keep in mind that on the way back from the\n        // substeps we will have to sum the result with the local data in C this\n        // is necessary since reduction happens AFTER the substeps so we cannot\n        // pass beta = 1 if the data is not present there BEFORE the substeps\n        scalar_t new_beta = beta;\n        if (strategy_->split_k(step) && beta != scalar_t{0}) {\n            new_beta = 0;\n        }\n\n        // invoke the substeps\n        compute_max_buffer_size(\n            newm, newn, newk, newP, step + 1, rank, new_beta);\n\n        if (expanded) {\n            // the buffer sizes are back to the previous values\n            // (the values at the beginning of this parallel step)\n            layout_->set_sizes(\n                newP, size_before_expansion, newP.first() - P.first());\n        }\n    }\n\n    // unshift(offset);\n    layout_->set_seq_buckets(P, buckets);\n}\n\ntemplate <typename T>\nT* Buffer<T>::operator[](const size_t index) {\n    return ctxt_->get_memory_pool().get_buffer_pointer(buffers_[index]);\n}\n\ntemplate <typename T>\nT* Buffer<T>::operator[](const size_t index) const {\n    return ctxt_->get_memory_pool().get_buffer_pointer(buffers_[index]);\n}\n\ntemplate <typename T>\nsize_t Buffer<T>::max_send_buffer_size() const {\n    return max_send_buffer_size_;\n}\n\ntemplate <typename T>\nsize_t Buffer<T>::max_recv_buffer_size() const {\n    return max_recv_buffer_size_;\n}\n\n// Explicit instantiations\n//\ntemplate class Buffer<double>;\ntemplate class Buffer<std::complex<double>>;\ntemplate class Buffer<float>;\ntemplate class Buffer<std::complex<float>>;\n\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/buffer.hpp",
    "content": "#pragma once\n#include <cosma/interval.hpp>\n#include <cosma/layout.hpp>\n#include <cosma/mapper.hpp>\n#include <cosma/strategy.hpp>\n#include <cosma/context.hpp>\n\n#ifdef COSMA_HAVE_GPU\n#include <Tiled-MM/util.hpp>\n#endif\n\n#include <vector>\n\n/*\n * This class wrapps up a vector of buffers representing single matrix (A, B or\n * C). During the algorithm, a new buffer is allocated in each parallel step in\n * which this matrix was expanded (i.e. not split). However, here we also use\n * some optimization and as soon as the number of blocks of this matrix that the\n * current rank owns reaches 1 (meaning that there are no sequential steps after\n * that step), no new allocations occur, but the sending and receiving buffers\n * keep swapping. This is possible because if there are no sequential steps\n * afterwards then after the communication matrix is expanded and the receiving\n * buffer owns everything that a sending buffer owns plus the same pieces from\n * other ranks. Therefore, after communication, we don't need the sending buffer\n * and we can reuse it to be the next receiving buffer (i.e. we can keep\n * swapping sending and receiving buffers in each parallel step in which this\n * matrix was expanded as long as there are no sequential steps left.\n */\n\nnamespace cosma {\n\ntemplate <typename Scalar>\nclass Buffer {\npublic:\n    using scalar_t = Scalar;\n\n    // Buffer() = default;\n    Buffer();\n\n    // with cosma_context*\n    Buffer(cosma_context<Scalar>* ctxt,\n           Mapper *mapper,\n           Layout *layout,\n           bool dry_run = false);\n\n    // without context (using global singleton context)\n    Buffer(Mapper *mapper,\n           Layout *layout,\n           bool dry_run = false);\n\n    ~Buffer();\n\n    Buffer &operator=(Buffer &) = delete;\n    Buffer &operator=(Buffer &&) = default;\n\n    // allocates all the buffers that are needed for the current matrix and the\n    // current rank\n    void allocate_initial_buffers(bool dry_run = false);\n    void allocate_communication_buffers(bool dry_run = false);\n\n    void free_initial_buffers(bool dry_run = false);\n    void free_communication_buffers(bool dry_run = false);\n\n    // get an array of all sizes that this matrix will require\n    std::vector<size_t> get_all_buffer_sizes();\n\n    // increases the index of the current buffer\n    void advance_buffer();\n    // returns the index of the current buffer\n    int buffer_index();\n    // sets the index of the current buffer to idx\n    void set_buffer_index(int idx);\n    // swaps ids of current_buffer and reduce buffer\n    void swap_reduce_buffer_with(size_t buffer_idx);\n\n    // returns the pointer to the current buffer\n    scalar_t* buffer_ptr();\n    const scalar_t* buffer_ptr() const;\n    const size_t buffer_size() const;\n\n    // pointer to the reshuffle buffer used when n_blocks > 1\n    scalar_t *reshuffle_buffer_ptr();\n    // pointer to the parallel-reduce buffer used when beta > 0\n    scalar_t *reduce_buffer_ptr();\n    // returns index of a buffer that is used in gemm\n    // it can be either last or pre-last buffer\n    // depending on the parity of #parallel steps\n    // after the last sequential step.\n    // (since only last two buffers keep swapping).\n    int buff_index_before_gemm() const;\n\n    // returns the initial buffer (i.e. with index 0)\n    // this buffer owns the initial matrix data\n    scalar_t* initial_buffer_ptr();\n    const scalar_t* initial_buffer_ptr() const;\n    const size_t initial_buffer_size() const;\n\n    // we can access i-th buffer of this class with [] operator\n    scalar_t* operator[](const size_t index);\n    scalar_t* operator[](const size_t index) const;\n\n    // can be A, B or C, determining the matrix\n    char label_;\n    // the strategy is owned by the matrix object. here only the pointer to\n    // avoid the creation of the strategy again.\n    const Strategy *strategy_;\n    // current rank\n    int rank_;\n\n    // used to get the size of the initial buffer\n    Mapper *mapper_;\n    // used to get the sizes of buffers needed in each step\n    Layout *layout_;\n\nprotected:\n    // computes the buffer sizes that is needed for this matrix (where\n    // label_=\"A\", \"B\" or \"C\"); the length of this vector is the number of\n    // different buffers that is needed.\n    std::vector<size_t> compute_buffer_size();\n    std::vector<size_t> compute_buffer_size(Interval &m,\n                                               Interval &n,\n                                               Interval &k,\n                                               Interval &P,\n                                               int step,\n                                               int rank,\n                                               scalar_t beta);\n\n    // when the number of blocks that the current rank owns from this matrix\n    // reaches 1 (meaning that there are no sequential steps left) then no new\n    // buffers are allocated. From this moment, this function is invoked and it\n    // follows the tree of execution of the algorithm and finds the largest two\n    // buffers for this matrix that are needed. These two buffers will then be\n    // reused and swapped in the whole subtree of the execution.\n    void compute_max_buffer_size(Interval &m,\n                                 Interval &n,\n                                 Interval &k,\n                                 Interval &P,\n                                 int step,\n                                 int rank,\n                                 scalar_t beta);\n\n    // initializes two arrays:\n    // 1. n_buckets_ : this vectors gives us for each step of the algorithm the\n    // number of\n    //     different blocks that the current ranks owns from the current matrix.\n    //     The number of blocks in step i is equal to the product of divisors in\n    //     all sequential steps j > i (thus excluding step i) in which the\n    //     current matrix was split.\n\n    // 2. expanded_after_ : for each step i of the algorithm returns true/false\n    // showing whether\n    //     the current matrix was expanded in some of the following parallel\n    //     steps (including the i-th step). The matrix is expanded in a parallel\n    //     step if it is NOT split in that step.\n    void compute_n_buckets();\n\n\n    cosma_context<Scalar>* ctxt_;\n\n    // computes the number of buckets in the current step\n    // the number of buckets in some step i is equal to the\n    // product of all divisors in sequential steps that follow step i\n    // in which the current matrix was divided\n    std::vector<int> n_buckets_;\n    std::vector<bool> expanded_after_;\n\n    // vector of buffers being used for the current matrix (given by label)\n    // by the current rank (determined by variable rank_)\n    std::vector<size_t> buffers_;\n    std::vector<size_t> buff_sizes_;\n    // temporary buffer used for reshuffling of data received from other ranks\n    // this happens when sequential steps are present, i.e. when n_blocks > 1\n    size_t reshuffle_buffer_;\n    // temporary buffer used in parallel-reduce step (two-sided communication)\n    // used when beta > 0 (to save current C)\n    size_t reduce_buffer_;\n    // pointer to the current buffer being used in the previous vector of\n    // buffers\n    int current_buffer_ = 0;\n\n    // buffer used in sequential steps for reshuffling\n    size_t max_reshuffle_buffer_size_ = 0;\n    // buffer used in parallel reduce step, when beta == 1\n    size_t max_reduce_buffer_size_ = 0;\n\n    // computed by compute_max_buffer_size function. represent the two largest\n    // buffer sizes (max_recv_buffer_size >= max_send_buffer_size);\n    size_t max_send_buffer_size_ = 0;\n    size_t max_recv_buffer_size_ = 0;\n    size_t max_par_block_size_ = 0;\n    // max size of the matrix in the base case (among all base cases)\n    size_t max_base_buffer_size_ = 0;\n    size_t max_send_buffer_size() const;\n    size_t max_recv_buffer_size() const;\n\n    void init_first_split_steps();\n    // first seq step that splits the current matrix\n    int first_seq_split_step;\n    int last_first_seq_split_step;\n    // first parallel step that does expands (i.e. does not split) the current\n    // matrix\n    int first_par_extend_step;\n\n    // if true, memory already pinned\n    bool pinned_ = false;\n};\n\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/cinterface.cpp",
    "content": "#include <costa/grid2grid/grid2D.hpp>\n\n#include <cosma/cinterface.hpp>\n#include <cosma/multiply.hpp>\n\n#include <mpi.h>\n\nnamespace cosma {\n\ntemplate <class T>\ncosta::grid_layout<T> grid_from_clayout(int n_ranks,\n                                            const ::layout *layout) {\n\n    // Create the local blocks\n    std::vector<costa::block<T>> loc_blks;\n\n    // Create blocks\n    for (int i = 0; i < layout->nlocalblocks; ++i) {\n        auto &block = layout->localblocks[i];\n        auto row = block.row;\n        auto col = block.col;\n        auto ptr = reinterpret_cast<T *>(block.data);\n        auto stride = block.ld;\n\n        costa::block_coordinates coord{row, col};\n        costa::interval rows{layout->rowsplit[row],\n                             layout->rowsplit[row + 1]};\n        costa::interval cols{layout->colsplit[col],\n                                 layout->colsplit[col + 1]};\n        loc_blks.emplace_back(rows, cols, coord, ptr, stride);\n    }\n\n    // Grid specification\n    std::vector<int> rows_split(layout->rowblocks + 1);\n    std::copy_n(layout->rowsplit, rows_split.size(), rows_split.begin());\n\n    std::vector<int> cols_split(layout->colblocks + 1);\n    std::copy_n(layout->colsplit, cols_split.size(), cols_split.begin());\n\n    std::vector<std::vector<int>> owners_matrix(layout->rowblocks);\n    for (int i = 0; i < layout->rowblocks; ++i) {\n        owners_matrix[i].resize(layout->colblocks);\n        for (int j = 0; j < layout->colblocks; ++j)\n            owners_matrix[i][j] = layout->owners[i * layout->colblocks + j];\n    }\n\n    return {{{std::move(rows_split), std::move(cols_split)},\n             std::move(owners_matrix),\n             n_ranks},\n            {std::move(loc_blks)},\n            'C'};\n}\n\ntemplate <class T>\nvoid xmultiply_using_layout(MPI_Comm comm,\n                            const char *transa,\n                            const char *transb,\n                            const T *alpha,\n                            const layout *layout_a,\n                            const layout *layout_b,\n                            const T *beta,\n                            const layout *layout_c) {\n\n    // communicator size and rank\n    int rank, P;\n    MPI_Comm_size(comm, &P);\n    MPI_Comm_rank(comm, &rank);\n\n    auto cosma_layout_a = grid_from_clayout<T>(P, layout_a);\n    auto cosma_layout_b = grid_from_clayout<T>(P, layout_b);\n    auto cosma_layout_c = grid_from_clayout<T>(P, layout_c);\n\n    // perform cosma multiplication\n    cosma::multiply_using_layout<T>(\n        cosma_layout_a, cosma_layout_b, cosma_layout_c, *alpha, *beta, *transa, *transb, comm);\n}\n} // namespace cosma\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nvoid smultiply_using_layout(MPI_Comm comm,\n                            const char *transa,\n                            const char *transb,\n                            const float *alpha,\n                            const layout *layout_a,\n                            const layout *layout_b,\n                            const float *beta,\n                            const layout *layout_c) {\n\n    cosma::xmultiply_using_layout<float>(\n        comm, transa, transb, alpha, layout_a, layout_b, beta, layout_c);\n}\n\nvoid dmultiply_using_layout(MPI_Comm comm,\n                            const char *transa,\n                            const char *transb,\n                            const double *alpha,\n                            const layout *layout_a,\n                            const layout *layout_b,\n                            const double *beta,\n                            const layout *layout_c) {\n\n    cosma::xmultiply_using_layout<double>(\n        comm, transa, transb, alpha, layout_a, layout_b, beta, layout_c);\n}\n\nvoid cmultiply_using_layout(MPI_Comm comm,\n                            const char *transa,\n                            const char *transb,\n                            const float *alpha,\n                            const layout *layout_a,\n                            const layout *layout_b,\n                            const float *beta,\n                            const layout *layout_c) {\n\n    cosma::xmultiply_using_layout<std::complex<float>>(\n        comm,\n        transa,\n        transb,\n        reinterpret_cast<const std::complex<float> *>(alpha),\n        layout_a,\n        layout_b,\n        reinterpret_cast<const std::complex<float> *>(beta),\n        layout_c);\n}\n\nvoid zmultiply_using_layout(MPI_Comm comm,\n                            const char *transa,\n                            const char *transb,\n                            const double *alpha,\n                            const layout *layout_a,\n                            const layout *layout_b,\n                            const double *beta,\n                            const layout *layout_c) {\n\n    cosma::xmultiply_using_layout<std::complex<double>>(\n        comm,\n        transa,\n        transb,\n        reinterpret_cast<const std::complex<double> *>(alpha),\n        layout_a,\n        layout_b,\n        reinterpret_cast<const std::complex<double> *>(beta),\n        layout_c);\n}\n\n#ifdef __cplusplus\n}\n#endif\n"
  },
  {
    "path": "src/cosma/cinterface.hpp",
    "content": "#pragma once\n\n#include <mpi.h>\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/**\n * A local block of the matrix.\n * data: a pointer to the start of the local matrix A_loc\n * ld: leading dimension or distance between two columns of A_loc\n * row: the global block row index\n * col: the global block colum index\n */\nstruct block {\n    void *data;\n    const int ld;\n    const int row;\n    const int col;\n};\n\n/**\n * Description of a distributed layout of a matrix\n * rowblocks: number of gobal blocks\n * colblocks: number of gobal blocks\n * rowsplit: [rowsplit[i], rowsplit[i+1]) is range of rows of block i\n * colsplit: [colsplit[i], colsplit[i+1]) is range of columns of block i\n * nlocalblock: number of blocks owned by the current rank\n * localblcoks: an array of block descriptions of the current rank\n */\nstruct layout {\n    int rowblocks;\n    int colblocks;\n    const int *rowsplit;\n    const int *colsplit;\n    const int *owners;\n    int nlocalblocks;\n    block *localblocks;\n};\n\nvoid smultiply_using_layout(MPI_Comm comm,\n                            const char *transa,\n                            const char *transb,\n                            const float *alpha,\n                            const layout *layout_a,\n                            const layout *layout_b,\n                            const float *beta,\n                            const layout *layout_c);\n\nvoid dmultiply_using_layout(MPI_Comm comm,\n                            const char *transa,\n                            const char *transb,\n                            const double *alpha,\n                            const layout *layout_a,\n                            const layout *layout_b,\n                            const double *beta,\n                            const layout *layout_c);\n\nvoid cmultiply_using_layout(MPI_Comm comm,\n                            const char *transa,\n                            const char *transb,\n                            const float *alpha,\n                            const layout *layout_a,\n                            const layout *layout_b,\n                            const float *beta,\n                            const layout *layout_c);\n\nvoid zmultiply_using_layout(MPI_Comm comm,\n                            const char *transa,\n                            const char *transb,\n                            const double *alpha,\n                            const layout *layout_a,\n                            const layout *layout_b,\n                            const double *beta,\n                            const layout *layout_c);\n\n#ifdef __cplusplus\n}\n#endif\n"
  },
  {
    "path": "src/cosma/communicator.cpp",
    "content": "#include <complex>\n\n#include <cosma/communicator.hpp>\n#include <cosma/one_sided_communicator.hpp>\n#include <cosma/two_sided_communicator.hpp>\n\n#if defined(COSMA_HAVE_GPU) && defined(COSMA_WITH_NCCL)\n#include <cosma/gpu/nccl_utils.hpp>\n#endif\n\nnamespace cosma {\nbool communicator::use_busy_waiting = true;\n\ncommunicator::communicator(const Strategy strategy, \n                           MPI_Comm comm)\n    : strategy_(strategy) {\n\n    use_busy_waiting = strategy_.use_busy_waiting;\n\n    MPI_Comm_rank(comm, &rank_);\n    // rank_ = reordered_rank(rank_);\n    MPI_Comm_size(comm, &comm_size_);\n    // check if the reordered rank belongs \n    // to this communicator\n    assert(rank_ < comm_size_);\n    using_reduced_comm_ = comm_size_ != strategy.P;\n    is_idle_ = rank_ >= strategy.P;\n\n    if (using_reduced_comm_) {\n        MPI_Group group;\n        MPI_Comm_group(comm, &group);\n        std::vector<int> exclude_ranks;\n        for (int i = strategy.P; i < comm_size_; ++i) {\n            // exclude_ranks.push_back(reordered_rank(i));\n            exclude_ranks.push_back(i);\n        }\n\n        MPI_Group reduced_group;\n\n        MPI_Group_excl(group,\n                       exclude_ranks.size(),\n                       exclude_ranks.data(),\n                       &reduced_group);\n        MPI_Comm_create_group(comm, reduced_group, 0, &full_comm_);\n\n        MPI_Group_free(&group);\n        MPI_Group_free(&reduced_group);\n    } else {\n        // this communicator has to be duplicated as it's being cached.\n        // The user might deallocate the comm as it's allocated outside of COSMA\n        // for this reason, we have to ensure that we duplicate the comm\n        // before it's cached in the COSMA context\n        MPI_Comm_dup(comm, &full_comm_);\n        // full_comm_ = comm;\n    }\n\n    if (is_idle_) {\n        return;\n    }\n\n    if (strategy_.topology) {\n        add_topology();\n    }\n\n    create_communicators(full_comm_);\n    // split_communicators(full_comm_);\n\n    step_to_comm_index_ = std::vector<int>(strategy_.n_steps());\n    int idx = 0;\n    for (int i = 0; i < strategy_.n_steps(); ++i) {\n        step_to_comm_index_[i] = idx;\n        if (strategy_.parallel_step(i))\n            idx++;\n    }\n}\n\ncommunicator::~communicator() {\n    if (!is_idle_) {\n        free_comms();\n    }\n}\n\nbool communicator::is_idle() { return is_idle_; }\n\n// helper function for add_topology\n// every communication pair represents one edge in the topology graph\n// this functions finds all edges for the current rank\n// weight of the edge is given by the amount of communicated data\nvoid communicator::get_topology_edges(std::vector<int> &dest,\n                                      std::vector<int> &weight) {\n    int m = strategy_.m;\n    int n = strategy_.n;\n    int k = strategy_.k;\n    Interval P(0, strategy_.P - 1);\n    int n_steps = strategy_.n_steps();\n\n    for (int step = 0; step < n_steps; ++step) {\n        m /= strategy_.divisor_m(step);\n        n /= strategy_.divisor_n(step);\n        k /= strategy_.divisor_k(step);\n\n        if (strategy_.parallel_step(step)) {\n            int div = strategy_.divisor(step);\n            int partition_idx = P.subinterval_index(div, rank_);\n            Interval newP = P.subinterval(div, partition_idx);\n            int group, offset;\n            std::tie(group, offset) = group_and_offset(P, div);\n\n            for (int gp = 0; gp < div; ++gp) {\n                int neighbor =\n                    P.first() + rank_outside_ring(P, div, offset, gp);\n                if (neighbor == rank_)\n                    continue;\n                dest.push_back(neighbor);\n\n                int communication_size = 0;\n                if (strategy_.split_n(step))\n                    communication_size = m * k / newP.length();\n                else if (strategy_.split_m(step))\n                    communication_size = k * n / newP.length();\n                else\n                    communication_size = m * n / newP.length();\n\n                weight.push_back(communication_size);\n            }\n\n            P = newP;\n        }\n    }\n}\n\nvoid communicator::add_topology() {\n    int n_sources = 1;\n    int source[1] = {rank_};\n    std::vector<int> dest;\n    std::vector<int> weight;\n\n    get_topology_edges(dest, weight);\n\n    int n_edges = dest.size();\n    int degree = dest.size();\n    int degrees[1] = {degree};\n\n    if (n_edges >= 1) {\n        MPI_Dist_graph_create(full_comm_,\n                              n_sources,\n                              source,\n                              degrees,\n                              dest.data(),\n                              weight.data(),\n                              MPI_INFO_NULL,\n                              true,\n                              &full_comm_);\n    }\n}\n\nvoid communicator::initialize(int *argc, char ***argv) { MPI_Init(argc, argv); }\n\nint communicator::rank() { return rank_; }\n\nvoid communicator::full_barrier() { MPI_Barrier(full_comm_); }\n\nvoid communicator::barrier(int step) {\n    int comm_index = step_to_comm_index_[step];\n    MPI_Barrier(comm_ring_[comm_index]);\n}\n\nMPI_Comm communicator::full_comm() {\n    return full_comm_;\n}\n\nMPI_Comm communicator::active_comm(int step) {\n    int comm_index = step_to_comm_index_[step];\n    return comm_ring_[comm_index];\n}\n\n#ifdef COSMA_WITH_NCCL\nncclComm_t communicator::active_nccl_comm(int step) {\n    int comm_index = step_to_comm_index_[step];\n    return nccl_comm_ring_[comm_index];\n}\n#endif\n\nint communicator::comm_size() { return comm_size_; }\n\nvoid communicator::free_comm(MPI_Comm &comm) { \n    int mpi_finalized;\n    MPI_Finalized(&mpi_finalized);\n    if (!mpi_finalized) {\n        MPI_Comm_free(&comm); \n    }\n}\n\nvoid communicator::free_group(MPI_Group &group) { MPI_Group_free(&group); }\n\nvoid communicator::finalize() { MPI_Finalize(); }\n\nint communicator::relative_rank(Interval &P, int r) { return r - P.first(); }\n\nint communicator::relative_rank(Interval &P) { return relative_rank(P, rank_); }\n\nint communicator::offset(Interval &P, int div, int r) {\n    return P.subinterval_offset(div, r);\n}\n\nint communicator::offset(Interval &P, int div) { return offset(P, div, rank_); }\n\nint communicator::group(Interval &P, int div, int r) {\n    return P.subinterval_index(div, r);\n}\n\nint communicator::group(Interval &P, int div) { return group(P, div, rank_); }\n\nstd::pair<int, int>\ncommunicator::group_and_offset(Interval &P, int div, int r) {\n    return P.locate_in_subinterval(div, r);\n}\n\nstd::pair<int, int> communicator::group_and_offset(Interval &P, int div) {\n    return group_and_offset(P, div, rank_);\n}\n\nint communicator::rank_inside_ring(Interval &P, int div, int global_rank) {\n    return group(P, div, global_rank);\n}\n\nint communicator::rank_inside_ring(Interval &P, int div) {\n    return rank_inside_ring(P, div, rank_);\n}\n\nint communicator::rank_outside_ring(Interval &P, int div, int off, int i) {\n    return P.locate_in_interval(div, i, off);\n}\n\nvoid communicator::split_communicators(MPI_Comm comm) {\n    // MPI_Comm_group(comm, &comm_group);\n    Interval P(0, strategy_.P - 1);\n    // iterate through all steps and for each parallel\n    // step, create a suitable subcommunicator\n    for (int step = 0; step < strategy_.n_steps(); ++step) {\n        if (strategy_.parallel_step(step)) {\n            int div = strategy_.divisor(step);\n            int partition_idx = P.subinterval_index(div, rank_);\n            Interval newP = P.subinterval(div, partition_idx);\n            int group, offset;\n            std::tie(group, offset) = group_and_offset(P, div, rank_);\n            MPI_Comm comm_ring, comm_subproblem;\n            MPI_Comm_split(comm, group, offset, &comm_subproblem);\n            MPI_Comm_split(comm, offset, group, &comm_ring);\n\n            comm_ring_.push_back(comm_ring);\n            comm_subproblem_.push_back(comm_subproblem);\n\n#ifdef COSMA_WITH_NCCL\n            nccl_comm_ring_.push_back(gpu::mpi_to_nccl_comm(comm_ring_.back()));\n            nccl_comm_subproblem_.push_back(gpu::mpi_to_nccl_comm(comm_subproblem_.back()));\n#endif\n\n            comm = comm_subproblem;\n            P = newP;\n        }\n    }\n}\n\nMPI_Comm create_comm(MPI_Comm& comm, std::vector<int>& ranks) {\n    MPI_Comm newcomm;\n    MPI_Group subgroup;\n\n    MPI_Group comm_group;\n    MPI_Comm_group(comm, &comm_group);\n\n    MPI_Group_incl(comm_group, ranks.size(), ranks.data(), &subgroup);\n    MPI_Comm_create_group(comm, subgroup, 0, &newcomm);\n\n    communicator::free_group(subgroup);\n    communicator::free_group(comm_group);\n\n    return newcomm;\n}\n\n\nvoid communicator::create_communicators(MPI_Comm comm) {\n    // MPI_Comm_group(comm, &comm_group);\n    Interval P(0, strategy_.P - 1);\n\n    // iterate through all steps and for each parallel\n    // step, create a suitable subcommunicator\n    for (int step = 0; step < strategy_.n_steps(); ++step) {\n        if (strategy_.parallel_step(step)) {\n            int div = strategy_.divisor(step);\n            int partition_idx = P.subinterval_index(div, rank_);\n            Interval newP = P.subinterval(div, partition_idx);\n            int group, offset;\n            std::tie(group, offset) = group_and_offset(P, div, rank_);\n\n            comm_ring_.emplace_back(create_comm_ring(comm, P, offset, div));\n            comm_subproblem_.emplace_back(create_comm_subproblem(comm, P, newP));\n\n#ifdef COSMA_WITH_NCCL\n            nccl_comm_ring_.emplace_back(gpu::mpi_to_nccl_comm(comm_ring_.back()));\n            nccl_comm_subproblem_.emplace_back(gpu::mpi_to_nccl_comm(comm_subproblem_.back()));\n#endif\n\n            comm = comm_subproblem_.back();\n            P = newP;\n        }\n    }\n}\n\nMPI_Comm communicator::create_comm_ring(MPI_Comm comm,\n                                       Interval &P,\n                                       int offset,\n                                       int div) {\n    std::vector<int> ranks(div);\n    for (int i = 0; i < div; ++i) {\n        ranks[i] = rank_outside_ring(P, div, offset, i);\n    }\n\n    return create_comm(comm, ranks);\n}\n\nMPI_Comm communicator::create_comm_subproblem(MPI_Comm comm,\n                                     Interval &P,\n                                     Interval &newP) {\n    MPI_Comm newcomm;\n    MPI_Group subgroup;\n\n    MPI_Group comm_group;\n    MPI_Comm_group(comm, &comm_group);\n\n    std::vector<int> ranks(newP.length());\n    for (int i = 0; i < ranks.size(); ++i) {\n        ranks[i] = relative_rank(P, newP.first() + i);\n    }\n\n    MPI_Group_incl(comm_group, ranks.size(), ranks.data(), &subgroup);\n    MPI_Comm_create(comm, subgroup, &newcomm);\n\n    free_group(subgroup);\n    free_group(comm_group);\n\n    return newcomm;\n}\n\nvoid communicator::free_comms() {\n    for (int i = comm_subproblem_.size() - 1; i >= 0; --i) {\n        free_comm(comm_subproblem_[i]);\n#ifdef COSMA_WITH_NCCL\n        gpu::free_nccl_comm(nccl_comm_subproblem_[i]);\n#endif\n    }\n    for (int i = comm_ring_.size() - 1; i >= 0; --i) {\n        free_comm(comm_ring_[i]);\n#ifdef COSMA_WITH_NCCL\n        gpu::free_nccl_comm(nccl_comm_ring_[i]);\n#endif\n    }\n    // if (using_reduced_comm_) {\n    free_comm(full_comm_);\n    full_comm_ = MPI_COMM_NULL;\n    //}\n}\n\ntemplate <typename Scalar>\nvoid communicator::copy(Interval &P,\n                        Scalar *in,\n                        Scalar *out,\n                        Scalar *reshuffle_buffer,\n                        std::vector<std::vector<int>> &size_before,\n                        std::vector<int> &total_before,\n                        int total_after,\n                        int step) {\n    MPI_Comm comm = active_comm(step);\n    two_sided_communicator::copy(comm,\n                                 rank(),\n                                 strategy_.divisor(step),\n                                 P,\n                                 in,\n                                 out,\n                                 reshuffle_buffer,\n                                 size_before,\n                                 total_before,\n                                 total_after);\n}\n\ntemplate <typename Scalar>\nvoid communicator::reduce(Interval &P,\n                          Scalar *in,\n                          Scalar *out,\n                          Scalar *reshuffle_buffer,\n                          Scalar *reduce_buffer,\n                          std::vector<std::vector<int>> &c_current,\n                          std::vector<int> &c_total_current,\n                          std::vector<std::vector<int>> &c_expanded,\n                          std::vector<int> &c_total_expanded,\n                          Scalar alpha,\n                          Scalar beta,\n                          int step) {\n    MPI_Comm comm = active_comm(step);\n    two_sided_communicator::reduce(comm,\n                                   rank(),\n                                   strategy_.divisor(step),\n                                   P,\n                                   in,  // LC\n                                   out, // C\n                                   reshuffle_buffer,\n                                   reduce_buffer,\n                                   c_current,\n                                   c_total_current,\n                                   c_expanded,\n                                   c_total_expanded,\n                                   beta);\n}\n\ntemplate <typename Scalar>\nvoid communicator::overlap_comm_and_comp(cosma_context<Scalar> *ctx,\n                                         CosmaMatrix<Scalar> &matrixA,\n                                         CosmaMatrix<Scalar> &matrixB,\n                                         CosmaMatrix<Scalar> &matrixC,\n                                         Interval &m,\n                                         Interval &n,\n                                         Interval &k,\n                                         Interval &P,\n                                         size_t step,\n                                         Scalar alpha,\n                                         Scalar beta) {\n    MPI_Comm comm = active_comm(step);\n    one_sided_communicator::overlap_comm_and_comp(ctx,\n                                                  comm,\n                                                  rank(),\n                                                  strategy_,\n                                                  matrixA,\n                                                  matrixB,\n                                                  matrixC,\n                                                  m,\n                                                  n,\n                                                  k,\n                                                  P,\n                                                  step,\n                                                  alpha,\n                                                  beta);\n}\n\nconst Strategy communicator::get_strategy() {\n    return strategy_;\n}\n\n// Explicit instantiations for `copy`\n//\ntemplate void\ncommunicator::copy<double>(Interval &P,\n                           double *in,\n                           double *out,\n                           double *reshuffle_buffer,\n                           std::vector<std::vector<int>> &size_before,\n                           std::vector<int> &total_before,\n                           int total_after,\n                           int step);\n\ntemplate void\ncommunicator::copy<float>(Interval &P,\n                          float *in,\n                          float *out,\n                          float *reshuffle_buffer,\n                          std::vector<std::vector<int>> &size_before,\n                          std::vector<int> &total_before,\n                          int total_after,\n                          int step);\n\ntemplate void communicator::copy<std::complex<float>>(\n    Interval &P,\n    std::complex<float> *in,\n    std::complex<float> *out,\n    std::complex<float> *reshuffle_buffer,\n    std::vector<std::vector<int>> &size_before,\n    std::vector<int> &total_before,\n    int total_after,\n    int step);\n\ntemplate void communicator::copy<std::complex<double>>(\n    Interval &P,\n    std::complex<double> *in,\n    std::complex<double> *out,\n    std::complex<double> *reshuffle_buffer,\n    std::vector<std::vector<int>> &size_before,\n    std::vector<int> &total_before,\n    int total_after,\n    int step);\n\n// Explicit instantiations for `reduce`\n//\ntemplate void\ncommunicator::reduce<float>(Interval &P,\n                            float *in,\n                            float *out,\n                            float *reshuffle_buffer,\n                            float *reduce_buffer,\n                            std::vector<std::vector<int>> &c_current,\n                            std::vector<int> &c_total_current,\n                            std::vector<std::vector<int>> &c_expanded,\n                            std::vector<int> &c_total_expanded,\n                            float alpha,\n                            float beta,\n                            int step);\n\ntemplate void\ncommunicator::reduce<double>(Interval &P,\n                             double *in,\n                             double *out,\n                             double *reshuffle_buffer,\n                             double *reduce_buffer,\n                             std::vector<std::vector<int>> &c_current,\n                             std::vector<int> &c_total_current,\n                             std::vector<std::vector<int>> &c_expanded,\n                             std::vector<int> &c_total_expanded,\n                             double alpha,\n                             double beta,\n                             int step);\n\ntemplate void communicator::reduce<std::complex<float>>(\n    Interval &P,\n    std::complex<float> *in,\n    std::complex<float> *out,\n    std::complex<float> *reshuffle_buffer,\n    std::complex<float> *reduce_buffer,\n    std::vector<std::vector<int>> &c_current,\n    std::vector<int> &c_total_current,\n    std::vector<std::vector<int>> &c_expanded,\n    std::vector<int> &c_total_expanded,\n    std::complex<float> alpha,\n    std::complex<float> beta,\n    int step);\n\ntemplate void communicator::reduce<std::complex<double>>(\n    Interval &P,\n    std::complex<double> *in,\n    std::complex<double> *out,\n    std::complex<double> *reshuffle_buffer,\n    std::complex<double> *reduce_buffer,\n    std::vector<std::vector<int>> &c_current,\n    std::vector<int> &c_total_current,\n    std::vector<std::vector<int>> &c_expanded,\n    std::vector<int> &c_total_expanded,\n    std::complex<double> alpha,\n    std::complex<double> beta,\n    int step);\n\n// Explicit instantiations for `overlap_comm_and_comp`\n//\ntemplate void\ncommunicator::overlap_comm_and_comp<double>(cosma_context<double> *ctx,\n                                            CosmaMatrix<double> &matrixA,\n                                            CosmaMatrix<double> &matrixB,\n                                            CosmaMatrix<double> &matrixC,\n                                            Interval &m,\n                                            Interval &n,\n                                            Interval &k,\n                                            Interval &P,\n                                            size_t step,\n                                            double alpha,\n                                            double beta);\ntemplate void\ncommunicator::overlap_comm_and_comp<float>(cosma_context<float> *ctx,\n                                           CosmaMatrix<float> &matrixA,\n                                           CosmaMatrix<float> &matrixB,\n                                           CosmaMatrix<float> &matrixC,\n                                           Interval &m,\n                                           Interval &n,\n                                           Interval &k,\n                                           Interval &P,\n                                           size_t step,\n                                           float alpha,\n                                           float beta);\n\ntemplate void communicator::overlap_comm_and_comp<std::complex<float>>(\n    cosma_context<std::complex<float>> *ctx,\n    CosmaMatrix<std::complex<float>> &matrixA,\n    CosmaMatrix<std::complex<float>> &matrixB,\n    CosmaMatrix<std::complex<float>> &matrixC,\n    Interval &m,\n    Interval &n,\n    Interval &k,\n    Interval &P,\n    size_t step,\n    std::complex<float> alpha,\n    std::complex<float> beta);\n\ntemplate void communicator::overlap_comm_and_comp<std::complex<double>>(\n    cosma_context<std::complex<double>> *ctx,\n    CosmaMatrix<std::complex<double>> &matrixA,\n    CosmaMatrix<std::complex<double>> &matrixB,\n    CosmaMatrix<std::complex<double>> &matrixC,\n    Interval &m,\n    Interval &n,\n    Interval &k,\n    Interval &P,\n    size_t step,\n    std::complex<double> alpha,\n    std::complex<double> beta);\n\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/communicator.hpp",
    "content": "#pragma once\n\n#include <algorithm>\n#include <iostream>\n#include <mpi.h>\n#include <stdlib.h>\n#include <tuple>\n\n#include <cosma/interval.hpp>\n#include <cosma/matrix.hpp>\n#include <cosma/strategy.hpp>\n#include <cosma/context.hpp>\n\n#if defined(COSMA_WITH_NCCL) && defined(TILED_MM_CUDA)\n#include <nccl.h>\n#endif\n\n#if defined(COSMA_WITH_NCCL) && defined(TILED_MM_ROCM)\n#include <rccl/rccl.h>\n#endif\n\nnamespace cosma {\n\n// forward-declaration\n// template <typename T>\n// class cosma_context;\n\nclass communicator {\n  public:\n    communicator() = default;\n    communicator(const Strategy strategy, MPI_Comm comm);\n    ~communicator();\n\n    /* In each communication step, processors are split and the communication is\n     * performed. P processors are split into d groups (d = divisor in this\n     * step), where each group consists of P/d processors.\n     *\n     * Communication rings are then created by taking 1 processor from each\n     * group with the same offset within that group.\n     * ------------------------------------------------------------------------------\n     * Example: P = 12, d = 3:\n     *    - 3 groups with 4 elements: [0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]\n     *    - 4 communication rings: [0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11]\n     *\n     * For this reason, to each rank, we assign two numbers: (gp, offset)\n     * describing which group this rank belongs to and what offset within\n     * its group this rank has. The offset uniquely determines the\n     * communication ring that the rank belongs to.\n     * ------------------------------------------------------------------------------\n     * Example: P = 12, d = 3:\n     *    - rank 1 belongs to group 1 and has offset 0 within this group.\n     *    - rank 4 belongs to group 0 and has offset 1 within this group.\n     *    - rank 10 belongs to group 2 and has offset 2 within this group.\n     * ------------------------------------------------------------------------------\n     */\n\n    /* Performs all-gather type of communication within each communiction ring.\n     *\n     * During the communication, ranks exchange the contents of \"in\" buffers.\n     * After the communication, all ranks within the same communication ring\n     * have exactly the same content in their \"out\" buffers, that contains\n     * the result of the all-gather communication. This means that after\n     * the communication, all the ranks within the same communication ring\n     * become completely independent of each other, since they have the same\n     * data regarding the communicated matrix.\n     * ------------------------------------------------------------------------------\n     * Example: P = 12, d = 3, first communication ring performs the following\n     * communication:\n     * ------------------------------------------------------------------------------\n     * BEFORE COMMUNICATION:\n     * rank 0:\n     *      buffer \"in\": a1, a2, a3 (different blocks)\n     * rank 4:\n     *      buffer \"in\": b1, b2, b3\n     * rank 8:\n     *      buffer \"in\": c1, c2, c3\n     * ------------------------------------------------------------------------------\n     * AFTER COMMUNICATION:\n     * ranks 0, 4, 8 have identical out buffer with the following content:\n     *      buffer \"out\": a1, b1, c1, a2, b2, c2, a3, b3, c3\n     * ------------------------------------------------------------------------------\n     * All ranks in the same communication ring have the same number of blocks,\n     * but all blocks can potentially have different sizes. The total number of\n     * blocks per rank is equal to the product of all divisors in sequential\n     * steps (only in sequential steps) in which this matrix was split. However,\n     * not all the blocks that a rank owns are necessarily exchanged in a single\n     * invocation of this function. Only blocks belonging to the current\n     * submatrix are being exchanged within a single invocation of copy.\n     */\n    template <typename Scalar>\n    void copy(Interval &P,\n              Scalar *in,\n              Scalar *out,\n              Scalar *reshuffle_buffer,\n              std::vector<std::vector<int>> &size_before,\n              std::vector<int> &total_before,\n              int total_after,\n              int step);\n\n    /* Performs reduce-scatter type of communication within each communiction\n     * ring. This can be thought as the inverse of copy, because here all ranks\n     * in the same communication ring have exactly the same number of elements\n     * in their \"in\" buffers.\n     *\n     * Each rank splits the data they have into equal number of blocks and then\n     * each rank reduces only a subset of blocks over all the ranks. Therefore,\n     * in copy the local buffers expand after the communication, whereas here\n     * the local buffers shrink because the rank wants to keep only a subset of\n     * blocks.\n     * ------------------------------------------------------------------------------\n     * Example: P = 12, d = 3, first communication ring performs the following\n     * communication:\n     * ------------------------------------------------------------------------------\n     * BEFORE COMMUNICATION:\n     * Each rank has the same structure of data: the same number of\n     * equally-sized blocks, but with possibly (almost surely) different content\n     * inside blocks. This data represents the partial results of the matrix C\n     * that should be reduced. Here, block a1 in rank 0 and in rank 4 can have\n     * (and probably will) different content (different partial results) but the\n     * size of the block a1 will be the same in all the ranks of the same\n     * communication ring.\n     *\n     * rank 0:\n     *      buffer \"in\": a1, b1, c1, a2, b2, c2, a3, b3, c3\n     * rank 4:\n     *      buffer \"in\": a1, b1, c1, a2, b2, c2, a3, b3, c3\n     * rank 8:\n     *      buffer \"in\": a1, b1, c1, a2, b2, c2, a3, b3, c3\n     * ------------------------------------------------------------------------------\n     * AFTER COMMUNICATION:\n     * rank 0:\n     *      buffer \"in\": a1, a2, a3 (summed over ranks)\n     * rank 4:\n     *      buffer \"in\": b1, b2, b3 (summed over ranks)\n     * rank 8:\n     *      buffer \"in\": c1, c2, c3 (summed over ranks)\n     *\n     * where each block after the communication (say block a1) is actually the\n     * sum of all a1-blocks in all the ranks of this communication ring\n     * ------------------------------------------------------------------------------\n     * All ranks in the same communication ring have the same number of blocks,\n     * but all blocks can potentially have different sizes. The total number of\n     * blocks per rank is equal to the product of all divisors in sequential\n     * steps (only in sequential steps) in which this matrix was split. However,\n     * not all the blocks that a rank owns are necessarily exchanged in a single\n     * invocation of this function. Only blocks belonging to the current\n     * submatrix are being exchanged within a single invocation of reduce.\n     */\n    template <typename Scalar>\n    void reduce(Interval &P,\n                Scalar *in,\n                Scalar *out,\n                Scalar *reshuffle_buffer,\n                Scalar *reduce_buffer,\n                std::vector<std::vector<int>> &c_current,\n                std::vector<int> &c_total_current,\n                std::vector<std::vector<int>> &c_expanded,\n                std::vector<int> &c_total_expanded,\n                Scalar alpha,\n                Scalar beta,\n                int step);\n\n    template <typename Scalar>\n    void overlap_comm_and_comp(cosma_context<Scalar> *ctx,\n                               CosmaMatrix<Scalar> &matrixA,\n                               CosmaMatrix<Scalar> &matrixB,\n                               CosmaMatrix<Scalar> &matrixC,\n                               Interval &m,\n                               Interval &n,\n                               Interval &k,\n                               Interval &P,\n                               size_t step,\n                               Scalar alpha,\n                               Scalar beta);\n\n    // creates the graph that represents the topology of mpi communicator\n    // it is \"aware\" of all the communications that will happen throughout\n    void add_topology();\n\n    static bool use_busy_waiting;\n\n    // invokes MPI_Init\n    static void initialize(int *argc, char ***argv);\n\n    // rank in the initial communicator\n    int rank();\n    int relative_rank(Interval &P);\n    int offset(Interval &P, int div);\n    int group(Interval &P, int div);\n    std::pair<int, int> group_and_offset(Interval &P, int div);\n    int rank_inside_ring(Interval &P, int div);\n\n    const Strategy *strategy();\n\n    // barrier over all the ranks taking part in the multiplication\n    void full_barrier();\n    // barrier over the active communicator in step\n    void barrier(int step);\n\n    // communicator active in step\n    MPI_Comm active_comm(int step);\n#ifdef COSMA_WITH_NCCL\n    // nccl communicator active in step\n    ncclComm_t active_nccl_comm(int step);\n#endif\n    MPI_Comm full_comm();\n\n    // size of the initial communicator\n    int comm_size();\n\n    // true if this rank is not taking part in the multiplication\n    // this might happen if the total number of ranks is e.g. prime\n    // or does not yield a convenient processor decomposition\n    bool is_idle();\n\n    // wrappers around MPI_Comm_free and MPI_Group_free\n    static void free_comm(MPI_Comm &comm);\n    static void free_group(MPI_Group &comm_group);\n\n    // wrapper around MPI_Finalize\n    static void finalize();\n\n    static int relative_rank(Interval &P, int rank);\n    static int offset(Interval &P, int div, int rank);\n    static int group(Interval &P, int div, int rank);\n    static std::pair<int, int> group_and_offset(Interval &P, int div, int rank);\n\n    /*\n       We split P processors into div groups of P/div processors.\n     * gp from [0..(div-1)] is the id of the group of the current rank\n     * offset from [0..(newP.length()-1)] is the offset of current rank inside\n     its group\n\n     We then define the communication ring of the current processor as:\n     i * (P/div) + offset, where i = 0..(div-1) and offset = rank() - i *\n     (P/div)\n     */\n    static int rank_inside_ring(Interval &P, int div, int global_rank);\n    static int rank_outside_ring(Interval &P, int div, int off, int gp);\n\n    // returns the current strategy\n    const Strategy get_strategy();\n\n  protected:\n    // hierarchy of communicators used throughout the algorithm\n    std::vector<MPI_Comm> comm_ring_;\n    std::vector<MPI_Comm> comm_subproblem_;\n    // equivalents of mpi communicators, but for nccl\n#ifdef COSMA_WITH_NCCL\n    std::vector<ncclComm_t> nccl_comm_ring_;\n    std::vector<ncclComm_t> nccl_comm_subproblem_;\n#endif\n    int rank_;\n    const Strategy strategy_;\n    std::vector<int> step_to_comm_index_;\n    MPI_Comm full_comm_ = MPI_COMM_NULL;\n    int comm_size_ = 0;\n    // if true then not all processors were used\n    // this usually happens if given number of processors\n    // cannot be decomposed nicely (e.g. if P is prime)\n    bool using_reduced_comm_;\n    bool is_idle_;\n\n    void get_topology_edges(std::vector<int> &dest, std::vector<int> &weight);\n\n    void create_communicators(MPI_Comm comm);\n    // same as create just uses MPI_Comm_split instead of MPI_Comm_create\n    void split_communicators(MPI_Comm comm);\n\n    MPI_Comm create_comm_ring(MPI_Comm comm, Interval &P, int offset, int div);\n\n    MPI_Comm create_comm_subproblem(MPI_Comm comm, Interval &P, Interval &newP);\n\n    void free_comms();\n};\n\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/context.cpp",
    "content": "#include <complex>\n#include <stdlib.h>\n\n#include \"context.hpp\"\n#include <cosma/communicator.hpp>\n#include <cosma/environment_variables.hpp>\n#include <cosma/profiler.hpp>\n\nnamespace cosma {\n#ifdef COSMA_HAVE_GPU\ntemplate <typename Scalar>\ngpu::mm_handle<Scalar> *cosma_context<Scalar>::get_gpu_context() {\n    return gpu_ctx_.get();\n}\n#endif\ntemplate <typename Scalar>\ncosma_context<Scalar>::cosma_context() {\n    cpu_memory_limit = get_cpu_max_memory<Scalar>();\n    memory_pool_.amortization = get_memory_pool_amortization();\n    adapt_to_scalapack_strategy = get_adapt_strategy();\n    overlap_comm_and_comp = get_overlap_comm_and_comp();\n    pin_host_buffers = get_memory_pinning();\n#ifdef COSMA_HAVE_GPU\n    gpu_ctx_ = gpu::make_context<Scalar>(\n        gpu_streams(), gpu_max_tile_m(), gpu_max_tile_n(), gpu_max_tile_k());\n#endif\n}\n\ntemplate <typename Scalar>\ncosma_context<Scalar>::cosma_context(size_t cpu_mem_limit,\n                                     int streams,\n                                     int tile_m,\n                                     int tile_n,\n                                     int tile_k) {\n    cpu_memory_limit = (long long)cpu_mem_limit;\n    adapt_to_scalapack_strategy = get_adapt_strategy();\n    overlap_comm_and_comp = get_overlap_comm_and_comp();\n    pin_host_buffers = get_memory_pinning();\n    memory_pool_.amortization = get_memory_pool_amortization();\n    // do not reserve nor resize the memory pool\n    // let this just serve as the upper bound when creating a strategy\n    // because otherwise, it might reserve/resize to much more than the problem\n    // requires memory_pool_.resize(cpu_mem_limit);\n#ifdef COSMA_HAVE_GPU\n    gpu_ctx_ = gpu::make_context<Scalar>(streams, tile_m, tile_n, tile_k);\n    use_unified_memory_ = cosma::get_unified_memory();\n#else\n    std::cout << \"Ignoring parameters in make_context. These parameters only \"\n                 \"used in the CPU version.\"\n              << std::endl;\n#endif\n}\n\ntemplate <typename Scalar>\ncosma_context<Scalar>::~cosma_context() {\n    memory_pool_.unpin_all();\n#ifdef DEBUG\n    if (output) {\n        std::cout << \"context destroyed\" << std::endl;\n    }\n#endif\n}\n\ntemplate <typename Scalar>\nmemory_pool<Scalar> &cosma_context<Scalar>::get_memory_pool() {\n    return memory_pool_;\n}\n\ntemplate <typename Scalar>\nlong long cosma_context<Scalar>::get_cpu_memory_limit() {\n    return cpu_memory_limit;\n}\n\ntemplate <typename Scalar>\ncosma::communicator *cosma_context<Scalar>::get_cosma_comm() {\n    return prev_cosma_comm.get();\n}\n\ntemplate <typename Scalar>\nvoid cosma_context<Scalar>::register_state(MPI_Comm comm,\n                                           const Strategy strategy) {\n    if (comm == MPI_COMM_NULL)\n        return;\n\n    int same_comm = 0;\n\n    if (!prev_cosma_comm || prev_cosma_comm->full_comm() == MPI_COMM_NULL) {\n        prev_strategy = strategy;\n\n        PE(preprocessing_communicators);\n        prev_cosma_comm = std::make_unique<cosma::communicator>(strategy, comm);\n        PL();\n    } else {\n        MPI_Comm prev_comm = prev_cosma_comm->full_comm();\n        int comm_compare;\n        MPI_Comm_compare(prev_comm, comm, &comm_compare);\n        same_comm = comm_compare == MPI_CONGRUENT || comm_compare == MPI_IDENT;\n\n        bool same_strategy = strategy == prev_strategy;\n\n        // if same_comm and same strategy -> reuse the communicators\n        if (!same_comm || !same_strategy) {\n            prev_strategy = strategy;\n\n            PE(preprocessing_communicators);\n            prev_cosma_comm =\n                std::make_unique<cosma::communicator>(strategy, comm);\n            PL();\n\n            memory_pool_.unpin_all();\n            memory_pool_.already_pinned = false;\n            memory_pool_.resized = false;\n        }\n    }\n\n    // if this rank is not taking part in multiply, return\n    // if (prev_cosma_comm->is_idle()) return;\n\n#ifdef COSMA_HAVE_GPU\n    if (!prev_cosma_comm->is_idle() && !memory_pool_.resized && same_comm &&\n        strategy == prev_strategy) {\n        memory_pool_.already_pinned = true;\n    }\n#endif\n}\n\ntemplate <typename Scalar>\nvoid cosma_context<Scalar>::turn_on_output() {\n    output = true;\n    memory_pool_.turn_on_output();\n}\n\ntemplate <typename Scalar>\ncontext<Scalar> make_context() {\n    return std::make_unique<cosma_context<Scalar>>();\n}\n\ntemplate <typename Scalar>\ncontext<Scalar> make_context(size_t cpu_mem_limit,\n                             int streams,\n                             int tile_m,\n                             int tile_n,\n                             int tile_k) {\n    return std::make_unique<cosma_context<Scalar>>(\n        cpu_mem_limit, streams, tile_m, tile_n, tile_k);\n}\n\n// Meyer's singleton, thread-safe in C++11, but not in C++03.\n// The thread-safety is guaranteed by the standard in C++11:\n//     If control enters the declaration concurrently\n//     while the variable is being initialized,\n//     the concurrent execution shall wait\n//     for completion of the initialization\ntemplate <typename Scalar>\nglobal_context<Scalar> get_context_instance() {\n    static context<Scalar> ctxt = make_context<Scalar>();\n    return ctxt.get();\n}\n\nusing zfloat = std::complex<float>;\nusing zdouble = std::complex<double>;\n\n// template instantiation for cosma_context\ntemplate class cosma_context<float>;\ntemplate class cosma_context<double>;\ntemplate class cosma_context<zfloat>;\ntemplate class cosma_context<zdouble>;\n\n// template instantiation for make_context\ntemplate context<float> make_context();\ntemplate context<double> make_context();\ntemplate context<zfloat> make_context();\ntemplate context<zdouble> make_context();\n\ntemplate context<float> make_context(size_t cpu_mem_limit,\n                                     int streams,\n                                     int tile_m,\n                                     int tile_n,\n                                     int tile_k);\ntemplate context<double> make_context(size_t cpu_mem_limit,\n                                      int streams,\n                                      int tile_m,\n                                      int tile_n,\n                                      int tile_k);\ntemplate context<zfloat> make_context(size_t cpu_mem_limit,\n                                      int streams,\n                                      int tile_m,\n                                      int tile_n,\n                                      int tile_k);\ntemplate context<zdouble> make_context(size_t cpu_mem_limit,\n                                       int streams,\n                                       int tile_m,\n                                       int tile_n,\n                                       int tile_k);\n\n// template instantiation for get_context_instance\ntemplate global_context<float> get_context_instance();\ntemplate global_context<double> get_context_instance();\ntemplate global_context<zfloat> get_context_instance();\ntemplate global_context<zdouble> get_context_instance();\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/context.hpp",
    "content": "#pragma once\n#include <cosma/memory_pool.hpp>\n#include <cosma/strategy.hpp>\n#include <iostream>\n#include <memory>\n\n#include <mpi.h>\n\n#ifdef COSMA_HAVE_GPU\n#include <Tiled-MM/device_stream.hpp>\n#include <Tiled-MM/tiled_mm.hpp>\n#endif\n\nnamespace cosma {\n\n// forward-declaration\nclass communicator;\n\ntemplate <typename Scalar>\nclass cosma_context {\n  public:\n    cosma_context();\n    cosma_context(size_t cpu_mem_limit,\n                  int streams,\n                  int tile_m,\n                  int tile_n,\n                  int tile_k);\n    ~cosma_context();\n\n    void register_state(MPI_Comm comm, const Strategy strategy);\n\n    memory_pool<Scalar> &get_memory_pool();\n#ifdef COSMA_HAVE_GPU\n    gpu::mm_handle<Scalar> *get_gpu_context();\n#endif\n\n    cosma::communicator *get_cosma_comm();\n\n    long long get_cpu_memory_limit();\n\n    void turn_on_output();\n\n    bool unified_memory();\n\n    bool adapt_to_scalapack_strategy = true;\n\n    bool overlap_comm_and_comp = false;\n\n    bool pin_host_buffers = true;\n\n#if defined(COSMA_WITH_GPU_AWARE_MPI) || defined(COSMA_WITH_NCCL)\n    gpu::device_stream gpu_stream;\n#endif\n\n  private:\n    long long cpu_memory_limit = std::numeric_limits<long long>::max();\n    memory_pool<Scalar> memory_pool_;\n#ifdef COSMA_HAVE_GPU\n    std::unique_ptr<gpu::mm_handle<Scalar>> gpu_ctx_;\n    // gpu::mm_handle<Scalar> gpu_ctx_;\n#endif\n    bool output = false;\n    bool use_unified_memory_ = false;\n    Strategy prev_strategy;\n    std::unique_ptr<cosma::communicator> prev_cosma_comm;\n};\n\ntemplate <typename Scalar>\nusing global_context = cosma_context<Scalar> *;\n\ntemplate <typename Scalar>\nusing context = std::unique_ptr<cosma_context<Scalar>>;\n\ntemplate <typename Scalar>\ncontext<Scalar> make_context();\n\ntemplate <typename Scalar>\ncontext<Scalar> make_context(size_t cpu_mem_limit,\n                             int streams,\n                             int tile_m,\n                             int tile_n,\n                             int tile_k);\n\n// Meyer's singleton, thread-safe in C++11, but not in C++03.\n// The thread-safety is guaranteed by the standard in C++11:\n//     If control enters the declaration concurrently\n//     while the variable is being initialized,\n//     the concurrent execution shall wait\n//     for completion of the initialization\ntemplate <typename Scalar>\nglobal_context<Scalar> get_context_instance();\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/cosma_pxgemm.cpp",
    "content": "#include <cassert>\n#include <mpi.h>\n\n#include <cosma/blacs.hpp>\n#include <cosma/multiply.hpp>\n#include <cosma/cosma_pxgemm.hpp>\n#include <cosma/profiler.hpp>\n#include <cosma/pxgemm_params.hpp>\n#include <cosma/environment_variables.hpp>\n\n#include <costa/grid2grid/ranks_reordering.hpp>\n#include <costa/grid2grid/transformer.hpp>\n\nnamespace cosma {\ntemplate <typename T>\nvoid pxgemm(const char transa,\n           const char transb,\n           const int m,\n           const int n,\n           const int k,\n           const T alpha,\n           const T *a,\n           const int ia,\n           const int ja,\n           const int *desca,\n           const T *b,\n           const int ib,\n           const int jb,\n           const int *descb,\n           const T beta,\n           T *c,\n           const int ic,\n           const int jc,\n           const int *descc) {\n    // **********************************\n    //           CORNER CASES\n    // **********************************\n    // edge cases, which are allowed by the standard\n    if (m == 0 || n == 0) return;\n    // afterwards we are sure m != 0 and n != 0\n    if (k == 0 || alpha == T{0}) {\n        // scale matrix C by beta\n        // starting from (ic-1, jc-1)\n        scale_matrix(descc, c, ic, jc, m, n, beta);\n        return;\n    }\n    // afterwards we are sure k != 0 and alpha != 0\n    // case beta == 0 is already handled by the code below\n\n    // **********************************\n    //           MAIN CODE\n    // **********************************\n    // clear the profiler\n    PC();\n    // start profiling\n    PE(init);\n    char trans_a = std::toupper(transa);\n    char trans_b = std::toupper(transb);\n\n    // blas context\n    int ctxt = scalapack::get_grid_context(desca, descb, descc);\n\n    // scalapack rank grid decomposition\n    int procrows, proccols;\n    int myrow, mycol;\n    blacs::Cblacs_gridinfo(ctxt, &procrows, &proccols, &myrow, &mycol);\n\n    // get MPI communicator\n    MPI_Comm comm = scalapack::get_communicator(ctxt);\n\n    // communicator size and rank\n    int rank, P;\n    MPI_Comm_size(comm, &P);\n    MPI_Comm_rank(comm, &rank);\n\n    // block sizes\n    scalapack::block_size b_dim_a(desca);\n    scalapack::block_size b_dim_b(descb);\n    scalapack::block_size b_dim_c(descc);\n\n    // global matrix sizes\n    scalapack::global_matrix_size mat_dim_a(desca);\n    scalapack::global_matrix_size mat_dim_b(descb);\n    scalapack::global_matrix_size mat_dim_c(descc);\n\n    // sumatrix size to multiply\n    int a_subm = trans_a == 'N' ? m : k;\n    int a_subn = trans_a == 'N' ? k : m;\n\n    int b_subm = trans_b == 'N' ? k : n;\n    int b_subn = trans_b == 'N' ? n : k;\n\n    int c_subm = m;\n    int c_subn = n;\n\n    // rank sources (rank coordinates that own first row and column of a matrix)\n    scalapack::rank_src rank_src_a(desca);\n    scalapack::rank_src rank_src_b(descb);\n    scalapack::rank_src rank_src_c(descc);\n\n    // leading dimensions\n    int lld_a = scalapack::leading_dimension(desca);\n    int lld_b = scalapack::leading_dimension(descb);\n    int lld_c = scalapack::leading_dimension(descc);\n\n    // check whether rank grid is row-major or col-major\n    auto ordering = scalapack::rank_ordering(ctxt, P);\n    char grid_order = \n        ordering == costa::scalapack::ordering::column_major ? 'C' : 'R';\n\n#ifdef DEBUG\n    if (rank == 0) {\n        pxgemm_params<T> params(\n                             // global dimensions\n                             mat_dim_a.rows, mat_dim_a.cols,\n                             mat_dim_b.rows, mat_dim_b.cols,\n                             mat_dim_c.rows, mat_dim_c.cols,\n                             // block dimensions\n                             b_dim_a.rows, b_dim_a.cols,\n                             b_dim_b.rows, b_dim_b.cols,\n                             b_dim_c.rows, b_dim_c.cols,\n                             // submatrix start\n                             ia, ja,\n                             ib, jb,\n                             ic, jc,\n                             // problem size\n                             m, n, k,\n                             // transpose flags\n                             trans_a, trans_b,\n                             // alpha, beta\n                             alpha, beta,\n                             // leading dimensinons\n                             lld_a, lld_b, lld_c,\n                             // processor grid\n                             procrows, proccols,\n                             // processor grid ordering\n                             grid_order,\n                             // ranks containing first rows\n                             rank_src_a.row_src, rank_src_a.col_src,\n                             rank_src_b.row_src, rank_src_b.col_src,\n                             rank_src_c.row_src, rank_src_c.col_src\n                         );\n        std::cout << params << std::endl;\n    }\n    MPI_Barrier(comm);\n#endif\n\n    std::vector<int> divisors;\n    std::string step_type = \"\";\n    std::string dimensions = \"\";\n    PL();\n\n    PE(strategy);\n    /*\n      If the matrix is very large, then its reshuffling is expensive.\n      For this reason, try to adapt the strategy to the scalapack layout\n      to minimize the need for reshuffling, even if it makes a \n      suoptimal communication scheme in COSMA.\n      This method will add \"prefix\" to the strategy, i.e. some initial steps\n      that COSMA should start with and then continue with finding \n      the communication-optimal strategy.\n     */\n    bool strategy_adapted = false;\n    if (P > 1 && get_context_instance<T>()->adapt_to_scalapack_strategy) {\n        adapt_strategy_to_block_cyclic_grid(divisors, dimensions, step_type,\n                                            m, n, k, P,\n                                            mat_dim_a, mat_dim_b, mat_dim_c,\n                                            b_dim_a, b_dim_b, b_dim_c,\n                                            ia, ja, ib, jb, ic, jc,\n                                            trans_a, trans_b,\n                                            procrows, proccols,\n                                            grid_order\n                                            );\n        if (step_type != \"\") {\n            strategy_adapted = true;\n        }\n    }\n\n    // get CPU memory limit\n    auto cpu_memory_limit = get_context_instance<T>()->get_cpu_memory_limit();\n    Strategy strategy(m, n, k, P,\n                      divisors, dimensions, step_type,\n                      cpu_memory_limit);\n    // enable overlapping communication and computation if turned on\n    if (get_context_instance<T>()->overlap_comm_and_comp) {\n        strategy.enable_overlapping_comm_and_comp();\n    }\n    PL();\n\n    PE(init);\n\n#ifdef DEBUG\n    if (rank == 0) {\n        std::cout << strategy << std::endl;\n        std::cout << \"============================================\" << std::endl;\n    }\n    MPI_Barrier(comm);\n#endif\n\n    PL();\n    // create COSMA mappers\n    Mapper mapper_a('A', strategy, rank);\n    Mapper mapper_b('B', strategy, rank);\n    Mapper mapper_c('C', strategy, rank);\n\n    auto cosma_grid_a = mapper_a.get_layout_grid();\n    auto cosma_grid_b = mapper_b.get_layout_grid();\n    auto cosma_grid_c = mapper_c.get_layout_grid();\n\n    PE(transform_init);\n    // get abstract layout descriptions for ScaLAPACK layout\n    auto scalapack_layout_a = costa::get_scalapack_layout<T>(\n        lld_a,\n        {mat_dim_a.rows, mat_dim_a.cols},\n        {ia, ja},\n        {a_subm, a_subn},\n        {b_dim_a.rows, b_dim_a.cols},\n        {procrows, proccols},\n        ordering,\n        {rank_src_a.row_src, rank_src_a.col_src},\n        a,\n        'C',\n        rank);\n\n    auto scalapack_layout_b = costa::get_scalapack_layout<T>(\n        lld_b,\n        {mat_dim_b.rows, mat_dim_b.cols},\n        {ib, jb},\n        {b_subm, b_subn},\n        {b_dim_b.rows, b_dim_b.cols},\n        {procrows, proccols},\n        ordering,\n        {rank_src_b.row_src, rank_src_b.col_src},\n        b,\n        'C',\n        rank);\n\n    auto scalapack_layout_c = costa::get_scalapack_layout<T>(\n        lld_c,\n        {mat_dim_c.rows, mat_dim_c.cols},\n        {ic, jc},\n        {c_subm, c_subn},\n        {b_dim_c.rows, b_dim_c.cols},\n        {procrows, proccols},\n        ordering,\n        {rank_src_c.row_src, rank_src_c.col_src},\n        c,\n        'C',\n        rank);\n    PL();\n\n    // by default, no process-relabeling is assumed.\n    bool reordered = false;\n    std::vector<int> rank_permutation;\n    MPI_Comm reordered_comm = comm;\n\n    if (!strategy_adapted) {\n        PE(transform_reordering_matching);\n        // total communication volume for transformation of layouts\n        // costa::comm_volume comm_vol;\n        auto comm_vol = costa::communication_volume(scalapack_layout_a.grid, cosma_grid_a, trans_a);\n        comm_vol += costa::communication_volume(scalapack_layout_b.grid, cosma_grid_b, trans_b);\n        comm_vol += costa::communication_volume(cosma_grid_c, scalapack_layout_c.grid, 'N');\n\n        // compute the optimal rank reordering that minimizes the communication volume\n        rank_permutation = costa::optimal_reordering(comm_vol, P, reordered);\n        PL();\n\n        // create reordered communicator, which has same ranks\n        // but relabelled as given by the rank_permutation\n        // (to avoid the communication during layout transformation)\n        PE(transform_reordering_comm);\n        if (reordered) {\n            MPI_Comm_split(comm, 0, rank_permutation[rank], &reordered_comm);\n        }\n        PL();\n    } else {\n        rank_permutation.reserve(P);\n        // if the strategy is adapted, then no process-relabeling occurs.\n        for (int i = 0; i < P; ++i) {\n            rank_permutation.push_back(i);\n        }\n    }\n\n#ifdef DEBUG\n    if (rank == 0) {\n        std::cout << \"Optimal rank relabeling:\" << std::endl;\n        for (int i = 0; i < P; ++i) {\n            std::cout << i << \"->\" << rank_permutation[i] << std::endl;\n        }\n    }\n#endif\n\n    // first, we don't want to alloate the space, just to precompute\n    // the required memory size, so we activate dry_run, which precomputes\n    // everything but doesn't allocate anything yet\n    bool dont_allocate = true;\n    CosmaMatrix<T> A(std::move(mapper_a), rank_permutation[rank], dont_allocate);\n    CosmaMatrix<T> B(std::move(mapper_b), rank_permutation[rank], dont_allocate);\n    CosmaMatrix<T> C(std::move(mapper_c), rank_permutation[rank], dont_allocate);\n\n    // avoid resizing the buffer by reserving immediately the total required memory\n    // collect sizes of all buffers that are going to be allocated for each matrix\n    auto A_buffers = A.required_memory();\n    auto B_buffers = B.required_memory();\n    auto C_buffers = C.required_memory();\n\n    std::vector<std::size_t> buffer_sizes;\n    int n_buffers = A_buffers.size() + B_buffers.size() + C_buffers.size();\n    if (n_buffers > 0) {\n        buffer_sizes.reserve(n_buffers);\n        std::copy(A_buffers.begin(), A_buffers.end(), std::back_inserter(buffer_sizes));\n        std::copy(B_buffers.begin(), B_buffers.end(), std::back_inserter(buffer_sizes));\n        std::copy(C_buffers.begin(), C_buffers.end(), std::back_inserter(buffer_sizes));\n\n        // allocate all buffers in the memory pool\n        get_context_instance<T>()->get_memory_pool().reserve(buffer_sizes);\n    }\n\n    // turn off dryrun mode, allocate memory for all matrices\n    A.allocate();\n    B.allocate();\n    C.allocate();\n\n    // get abstract layout descriptions for COSMA layout\n    auto cosma_layout_a = A.get_grid_layout();\n    auto cosma_layout_b = B.get_grid_layout();\n    auto cosma_layout_c = C.get_grid_layout();\n\n    cosma_layout_a.reorder_ranks(rank_permutation);\n    cosma_layout_b.reorder_ranks(rank_permutation);\n    cosma_layout_c.reorder_ranks(rank_permutation);\n\n#ifdef DEBUG\n    std::cout << \"Transforming the input matrices A and B from Scalapack -> COSMA\" << std::endl;\n#endif\n    // transform A and B from scalapack to cosma layout\n    costa::transformer<T> transf(comm);\n    transf.schedule(scalapack_layout_a, cosma_layout_a, trans_a, T{1}, T{0});\n    // transf.transform();\n    transf.schedule(scalapack_layout_b, cosma_layout_b, trans_b, T{1}, T{0});\n\n    transf.transform();\n\n#ifdef DEBUG\n    std::cout << \"COSMA multiply\" << std::endl;\n#endif\n\n    // perform cosma multiplication\n    multiply<T>(A, B, C, strategy, reordered_comm, T{1}, T{0});\n    // construct cosma layout again, to avoid outdated\n    // pointers when the memory pool has been used\n    // in case it resized during multiply\n    cosma_layout_c = C.get_grid_layout();\n    cosma_layout_c.reorder_ranks(rank_permutation);\n\n#ifdef DEBUG\n    std::cout << \"Transforming the result C back from COSMA to ScaLAPACK\" << std::endl;\n#endif\n    // costa::transform the result from cosma back to scalapack\n    // costa::transform<T>(cosma_layout_c, scalapack_layout_c, comm);\n    transf.schedule(cosma_layout_c, scalapack_layout_c, 'N', alpha, beta);\n\n    transf.transform();\n\n#ifdef DEBUG\n    if (rank == 0) {\n        auto reordered_vol = costa::communication_volume(scalapack_layout_a.grid, cosma_layout_a.grid, trans_a);\n        reordered_vol += costa::communication_volume(scalapack_layout_b.grid, cosma_layout_b.grid, trans_b);\n        if (std::abs(beta) > 0) {\n            reordered_vol += costa::communication_volume(scalapack_layout_c.grid, cosma_layout_c.grid, 'N');\n        }\n        reordered_vol += costa::communication_volume(cosma_layout_c.grid, scalapack_layout_c.grid, 'N');\n\n        // std::cout << \"Detailed comm volume: \" << comm_vol << std::endl;\n        // std::cout << \"Detailed comm volume reordered: \" << reordered_vol << std::endl;\n\n        auto reordered_vol_total = reordered_vol.total_volume();\n        std::cout << \"Comm volume [num_of_elements]= \" << reordered_vol_total << std::endl;\n    }\n#endif\n\n    PE(transform_reordering_comm);\n    if (reordered) {\n        MPI_Comm_free(&reordered_comm);\n    }\n    PL();\n}\n\n// scales the submatrix of C by beta\n// The submatrix is defined by (ic-1, jc-1) and (ic-1+m, jc-1+n)\ntemplate <typename T>\nvoid scale_matrix(const int* descc, T* c,\n                  const int ic, const int jc,\n                  const int m, const int n,\n                  const T beta) {\n    if (beta == T{1}) return;\n    // clear the profiler\n    PC();\n\n    // start profiling\n    PE(init);\n\n    // blas context\n    int ctxt = scalapack::get_grid_context(descc);\n\n    // scalapack rank grid decomposition\n    int procrows, proccols;\n    int myrow, mycol;\n    blacs::Cblacs_gridinfo(ctxt, &procrows, &proccols, &myrow, &mycol);\n\n    // get MPI communicator\n    MPI_Comm comm = scalapack::get_communicator(ctxt);\n\n    // communicator size and rank\n    int rank, P;\n    MPI_Comm_size(comm, &P);\n    MPI_Comm_rank(comm, &rank);\n\n    // block sizes\n    scalapack::block_size b_dim_c(descc);\n\n    // global matrix sizes\n    scalapack::global_matrix_size mat_dim_c(descc);\n\n    // sumatrix size to multiply\n    int c_subm = m;\n    int c_subn = n;\n\n    // rank sources (rank coordinates that own first row and column of a matrix)\n    scalapack::rank_src rank_src_c(descc);\n\n    // leading dimensions\n    int lld_c = scalapack::leading_dimension(descc);\n\n    // check whether rank grid is row-major or col-major\n    auto ordering = scalapack::rank_ordering(ctxt, P);\n    char grid_order = \n        ordering == costa::scalapack::ordering::column_major ? 'C' : 'R';\n\n    // create costa object describing the given scalapack layout\n    auto layout = costa::get_scalapack_layout<T>(\n        lld_c,\n        {mat_dim_c.rows, mat_dim_c.cols},\n        {ic, jc},\n        {c_subm, c_subn},\n        {b_dim_c.rows, b_dim_c.cols},\n        {procrows, proccols},\n        ordering,\n        {rank_src_c.row_src, rank_src_c.col_src},\n        c,\n        'C',\n        rank);\n    PL();\n\n    PE(multiply_computation);\n    // scale the elements in the submatrix given by the layout\n    layout.scale_by(beta);\n    PL();\n}\n\n// returns A, B or C, depending on which flag was set to true.\n// used to minimize the number of if-else statements in adapt_strategy\ntemplate <typename T>\nT& one_of(T &A,\n         T &B,\n         T &C,\n         bool first,\n         bool second,\n         bool third) {\n    if (first) return A;\n    if (second) return B;\n    return C;\n}\n\n// returns the largest of (first, second, third) and sets the corresponding\n// boolean flag of the largest element to true.\n// used to minimize the number of if-else statements in adapt_strategy\ntemplate <typename T>\nT which_is_largest(T&& first, T&& second, T&& third,\n                      bool& first_largest, bool& second_largest, bool& third_largest) {\n    T largest = std::max(std::max(first, second), third);\n    first_largest = false;\n    second_largest = false;\n    third_largest = false;\n    if (largest == first) {\n        first_largest = true;\n        return std::forward<T>(first);\n    }\n    if (largest == second) {\n        second_largest = true;\n        return std::forward<T>(second);\n    }\n    if (largest == third) {\n        third_largest = true;\n        return std::forward<T>(third);\n    }\n    return T{};\n}\n\nchar get_matrix_dimension(bool matrix_A, bool matrix_B, bool matrix_C,\n                                 char trans_a, char trans_b,\n                                 int index) {\n    std::string dimensions = \"\";\n    if (matrix_A) {\n        // if transposed\n        dimensions = trans_a != 'N' ? \"km\" : \"mk\";\n    } else if (matrix_B) {\n        dimensions = trans_b != 'N' ? \"kn\" : \"nk\";\n    } else {\n        dimensions = \"mn\";\n    }\n\n    return dimensions[index];\n}\n\nvoid adapt_strategy_to_block_cyclic_grid(// these will contain the suggested strategy prefix\n                                         std::vector<int>& divisors, \n                                         std::string& dimensions,\n                                         std::string& step_type,\n                                         // multiplication problem size\n                                         int m, int n, int k, int P,\n                                         // global matrix dimensions\n                                         scalapack::global_matrix_size& mat_dim_a,\n                                         scalapack::global_matrix_size& mat_dim_b,\n                                         scalapack::global_matrix_size& mat_dim_c,\n                                         // block sizes\n                                         scalapack::block_size& b_dim_a,\n                                         scalapack::block_size& b_dim_b,\n                                         scalapack::block_size& b_dim_c,\n                                         // (i, j) denoting the submatrix coordinates\n                                         int ia, int ja,\n                                         int ib, int jb,\n                                         int ic, int jc,\n                                         // transpose flags\n                                         char trans_a, char trans_b,\n                                         // processor grid\n                                         int procrows, int proccols,\n                                         char order\n                                         ) {\n    // If the matrix is very large, then its reshuffling is expensive.\n    // For this reason, try to adapt the strategy to the scalapack layout\n    // to minimize the need for reshuffling, even if it makes a \n    // suoptimal communication scheme in COSMA.\n    // Here, we only do this optimization if the scalapack grid\n    // fills up the matrix completely (everything is perfectly divisible).\n    // Since there are 3 matrices, we only focus on the largest one.\n    bool first = false;\n    bool second = false;\n    bool third = false;\n\n    // sumatrix size to multiply\n    int a_subm = trans_a == 'N' ? m : k;\n    int a_subn = trans_a == 'N' ? k : m;\n\n    int b_subm = trans_b == 'N' ? k : n;\n    int b_subn = trans_b == 'N' ? n : k;\n\n    int c_subm = m;\n    int c_subn = n;\n\n    long long largest_matrix_local_size = \n        which_is_largest(1LL * m * k, \n                         1LL * k * n, \n                         1LL * m * n, \n                         first,\n                         second,\n                         third) / P;\n\n    // We only apply this optimization if the matrix is large enough,\n    // because adapting the strategy to the given initial grid\n    // might result in a communication-suboptimal strategy.\n    // However, when the reshuffling cost is too high, then it might be beneficial\n    // to make COSMA use a communication-suboptimal strategy\n    // to reduce the overall time.\n    if (largest_matrix_local_size > 1e7) {\n        auto b_dim = one_of(b_dim_a, b_dim_b, b_dim_c, first, second, third);\n        auto mat_dim = one_of(mat_dim_a, mat_dim_b, mat_dim_c, first, second, third);\n        auto subm = one_of(a_subm, b_subm, c_subm, first, second, third);\n        auto subn = one_of(a_subn, b_subn, c_subn, first, second, third);\n        auto i = one_of(ia, ib, ic, first, second, third);\n        auto j = one_of(ja, jb, jc, first, second, third);\n\n        // The whole matrix should take part in the multiplication,\n        // the blocks sizes should perfectly divide the matrix\n        // and processor grid must perfectly cover the matrix blocks grid.\n        if ((i == 1 && j == 1)  // no submatrix\n            && (subm == mat_dim.rows && subn == mat_dim.cols) // no submatrix\n            && (mat_dim.rows % b_dim.rows == 0) // blocks perfectly divide the matrix\n            && (mat_dim.cols % b_dim.cols == 0)  // blocks perfectly divide the matrix\n            && (mat_dim.rows / b_dim.rows % procrows == 0)\n            && (mat_dim.cols / b_dim.cols % proccols == 0)) // processor grid divides the matrix blocks grid\n        {\n            int divisor_rows = mat_dim.rows / b_dim.rows / procrows;\n            int divisor_cols = mat_dim.cols / b_dim.cols / proccols;\n\n            // adding sequential steps\n            if (divisor_rows > 1) {\n                step_type += \"s\";\n                divisors.push_back(divisor_rows);\n                dimensions += get_matrix_dimension(first, second, third,\n                                                   trans_a, trans_b,\n                                                   0); // 0 means rows\n            }\n\n            if (divisor_cols > 1) {\n                step_type += \"s\";\n                divisors.push_back(divisor_cols);\n                dimensions += get_matrix_dimension(first, second, third,\n                                                   trans_a, trans_b,\n                                                   1); // 1 means columns\n            }\n\n            // adding parallel steps\n            if (order == 'R') {\n                // first add rows split and then cols split if applicable\n                if (procrows > 1) {\n                    step_type += \"p\";\n                    divisors.push_back(procrows);\n                    dimensions += get_matrix_dimension(first, second, third,\n                                                       trans_a, trans_b,\n                                                       0); // 1 means columns\n                }\n                if (proccols > 1) {\n                    step_type += \"p\";\n                    divisors.push_back(proccols);\n                    dimensions += get_matrix_dimension(first, second, third,\n                                                       trans_a, trans_b,\n                                                       1); // 1 means columns\n                }\n            } else {\n                // first add cols split and then rows split if applicable\n                if (proccols > 1) {\n                    step_type += \"p\";\n                    divisors.push_back(proccols);\n                    dimensions += get_matrix_dimension(first, second, third,\n                                                       trans_a, trans_b,\n                                                       1); // 1 means columns\n                }\n                if (procrows > 1) {\n                    step_type += \"p\";\n                    divisors.push_back(procrows);\n                    dimensions += get_matrix_dimension(first, second, third,\n                                                       trans_a, trans_b,\n                                                       0); // 1 means columns\n                }\n            }\n        }\n    }\n}\n\nbool is_problem_too_small(int m, int n, int k) {\n    static const int cosma_dim_threshold = cosma::get_cosma_dim_threshold();\n    return std::min(m, std::min(n, k)) < cosma_dim_threshold;\n}\n\n// explicit instantiation for pxgemm\ntemplate void pxgemm<double>(const char trans_a,\n                            const char trans_b,\n                            const int m,\n                            const int n,\n                            const int k,\n                            const double alpha,\n                            const double *a,\n                            const int ia,\n                            const int ja,\n                            const int *desca,\n                            const double *b,\n                            const int ib,\n                            const int jb,\n                            const int *descb,\n                            const double beta,\n                            double *c,\n                            const int ic,\n                            const int jc,\n                            const int *descc);\n\ntemplate void pxgemm<float>(const char trans_a,\n                           const char trans_b,\n                           const int m,\n                           const int n,\n                           const int k,\n                           const float alpha,\n                           const float *a,\n                           const int ia,\n                           const int ja,\n                           const int *desca,\n                           const float *b,\n                           const int ib,\n                           const int jb,\n                           const int *descb,\n                           const float beta,\n                           float *c,\n                           const int ic,\n                           const int jc,\n                           const int *descc);\n\ntemplate void pxgemm<zdouble_t>(const char trans_a,\n                               const char trans_b,\n                               const int m,\n                               const int n,\n                               const int k,\n                               const zdouble_t alpha,\n                               const zdouble_t *a,\n                               const int ia,\n                               const int ja,\n                               const int *desca,\n                               const zdouble_t *b,\n                               const int ib,\n                               const int jb,\n                               const int *descb,\n                               const zdouble_t beta,\n                               zdouble_t *c,\n                               const int ic,\n                               const int jc,\n                               const int *descc);\n\ntemplate void pxgemm<zfloat_t>(const char trans_a,\n                              const char trans_b,\n                              const int m,\n                              const int n,\n                              const int k,\n                              const zfloat_t alpha,\n                              const zfloat_t *a,\n                              const int ia,\n                              const int ja,\n                              const int *desca,\n                              const zfloat_t *b,\n                              const int ib,\n                              const int jb,\n                              const int *descb,\n                              const zfloat_t beta,\n                              zfloat_t *c,\n                              const int ic,\n                              const int jc,\n                              const int *descc);\n} // namespace cosma\n\n"
  },
  {
    "path": "src/cosma/cosma_pxgemm.hpp",
    "content": "#pragma once\n#include <complex>\n#include <cosma/scalapack.hpp>\n/*\n * This is a COSMA backend for matrices given in ScaLAPACK format.\n * It is less efficient than using cosma::multiply directly with COSMA data\n * layout. Thus, here we pay the price of transforming matrices between\n * scalapack and COSMA layout.\n */\nnamespace cosma {\n\nusing zdouble_t = std::complex<double>;\nusing zfloat_t = std::complex<float>;\n\ntemplate <typename T>\nvoid pxgemm(const char trans_a,\n           const char trans_b,\n           const int m,\n           const int n,\n           const int k,\n           const T alpha,\n           const T *a,\n           const int ia,\n           const int ja,\n           const int *desca,\n           const T *b,\n           const int ib,\n           const int jb,\n           const int *descb,\n           const T beta,\n           T *c,\n           const int ic,\n           const int jc,\n           const int *descc);\n\n/*\n  If the matrix is very large, then its reshuffling is expensive.\n  For this reason, try to adapt the strategy to the scalapack layout\n  to minimize the need for reshuffling, even if it makes a \n  suoptimal communication scheme in COSMA.\n*/\nvoid adapt_strategy_to_block_cyclic_grid(// these will contain the suggested strategy prefix\n                                         std::vector<int>& divisors, \n                                         std::string& dimensions,\n                                         std::string& step_type,\n                                         // multiplication problem size\n                                         int m, int n, int k, int P,\n                                         // global matrix dimensions\n                                         scalapack::global_matrix_size& mat_dim_a,\n                                         scalapack::global_matrix_size& mat_dim_b,\n                                         scalapack::global_matrix_size& mat_dim_c,\n                                         // block sizes\n                                         scalapack::block_size& b_dim_a,\n                                         scalapack::block_size& b_dim_b,\n                                         scalapack::block_size& b_dim_c,\n                                         // (i, j) denoting the submatrix coordinates\n                                         int ia, int ja,\n                                         int ib, int jb,\n                                         int ic, int jc,\n                                         // transpose flags\n                                         char transa, char transb,\n                                         // processor grid\n                                         int procrows, int proccols,\n                                         char order\n                                         );\n\n// scales the submatrix of C by beta\n// The submatrix is defined by (ic-1, jc-1) and (ic-1+m, jc-1+n)\ntemplate <typename T>\nvoid scale_matrix(const int* descc, T* c,\n                  const int ic, const int jc,\n                  const int m, const int n,\n                  const T beta);\n\n\n// checks if the problem is too small for COSMA\nbool is_problem_too_small(int m, int n, int k);\n\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/environment_variables.cpp",
    "content": "#include <algorithm>\n#include <cosma/environment_variables.hpp>\n\nbool cosma::env_var_defined(const char *var_name) {\n    char *var = getenv(var_name);\n    return var != nullptr;\n}\n\nbool cosma::get_bool_env_var(std::string name, bool default_value) {\n    char *var;\n    var = getenv(name.c_str());\n    bool value = default_value;\n    if (var != nullptr) {\n        std::string s(var);\n        std::transform(s.begin(), s.end(), s.begin(), [&](char c) {\n            return std::toupper(c);\n        });\n        value = (s == \"ON\");\n    }\n    return value;\n}\n\nint cosma::get_int_env_var(std::string name, int default_value) {\n    char *var;\n    var = getenv(name.c_str());\n    int value = default_value;\n    if (var != nullptr)\n        value = std::atoi(var);\n    return value;\n}\n\nfloat cosma::get_float_env_var(std::string name, float default_value) {\n    char *var;\n    var = getenv(name.c_str());\n    float value = default_value;\n    if (var != nullptr)\n        value = std::atof(var);\n    return value;\n}\n\ndouble cosma::get_double_env_var(std::string name, double default_value) {\n    char *var;\n    var = getenv(name.c_str());\n    double value = default_value;\n    if (var != nullptr)\n        value = std::atof(var);\n    return value;\n}\n\nstd::size_t cosma::get_ull_env_var(std::string name, size_t default_value) {\n    char *var;\n    var = getenv(name.c_str());\n    size_t value = default_value;\n    if (var != nullptr)\n        value = std::stoull(std::string(var));\n    return std::size_t(value);\n}\n\nint cosma::gpu_streams() {\n    return get_int_env_var(env_var_names::gpu_n_streams,\n                           env_var_defaults::gpu_n_streams);\n}\n\nint cosma::gpu_max_tile_m() {\n    return get_int_env_var(env_var_names::gpu_tile_m,\n                           env_var_defaults::gpu_tile_m);\n}\n\nint cosma::gpu_max_tile_n() {\n    return get_int_env_var(env_var_names::gpu_tile_n,\n                           env_var_defaults::gpu_tile_n);\n}\n\nint cosma::gpu_max_tile_k() {\n    return get_int_env_var(env_var_names::gpu_tile_k,\n                           env_var_defaults::gpu_tile_k);\n}\n\nbool cosma::get_adapt_strategy() {\n    return get_bool_env_var(env_var_names::adapt_strategy,\n                            env_var_defaults::adapt_strategy);\n}\n\nbool cosma::get_overlap_comm_and_comp() {\n    return get_bool_env_var(env_var_names::overlap, env_var_defaults::overlap);\n}\n\nbool cosma::get_memory_pinning() {\n    return get_bool_env_var(env_var_names::memory_pinning_enabled,\n                            env_var_defaults::memory_pinning_enabled);\n}\n\nbool cosma::get_unified_memory() {\n    return get_bool_env_var(env_var_names::cosma_gpu_unified_memory,\n                            env_var_defaults::unified_memory);\n}\n\ndouble cosma::get_memory_pool_amortization() {\n    return get_double_env_var(env_var_names::memory_pool_amortization,\n                              env_var_defaults::memory_pool_amortization);\n}\n\nint cosma::get_min_local_dimension() {\n    return get_int_env_var(env_var_names::min_local_dimension,\n                           env_var_defaults::min_local_dimension);\n}\n\nint cosma::get_cosma_dim_threshold() {\n    return get_int_env_var(env_var_names::cosma_dim_threshold,\n                           env_var_defaults::cosma_dim_threshold);\n}\n\nint cosma::get_cosma_cpu_memory_alignment() {\n    return get_int_env_var(env_var_names::cosma_cpu_memory_alignment,\n                           env_var_defaults::cosma_cpu_memory_alignment);\n}\n\n// reads the memory limit in MB per rank\n// and converts the limit to #elements that each rank is allowed to use\ntemplate <typename T>\nlong long cosma::get_cpu_max_memory() {\n    char *var;\n    var = getenv(env_var_names::cpu_max_memory.c_str());\n    long long value = env_var_defaults::cpu_max_memory;\n    long long megabytes = env_var_defaults::cpu_max_memory;\n    if (var != nullptr) {\n        megabytes = std::atoll(var);\n        // from megabytes to #elements\n        value = megabytes * 1024LL * 1024LL / sizeof(T);\n    }\n\n    return value;\n}\n\n// template instantiation of get_cpu_max_memory()\ntemplate long long cosma::get_cpu_max_memory<float>();\ntemplate long long cosma::get_cpu_max_memory<double>();\ntemplate long long cosma::get_cpu_max_memory<std::complex<float>>();\ntemplate long long cosma::get_cpu_max_memory<std::complex<double>>();\n"
  },
  {
    "path": "src/cosma/environment_variables.hpp",
    "content": "#pragma once\n#include <complex>\n#include <limits>\n#include <stdlib.h>\n#include <string>\n\nnamespace cosma {\n\n// names of supported environment variables\nnamespace env_var_names {\n// number of GPU streams to be used per rank\nconst std::string gpu_n_streams = \"COSMA_GPU_STREAMS\";\n// max sizes of GPU tiles (in #elements)\n// MxN corresponds to matrix C and K to the shared dimension\nconst std::string gpu_tile_m = \"COSMA_GPU_MAX_TILE_M\";\nconst std::string gpu_tile_n = \"COSMA_GPU_MAX_TILE_N\";\nconst std::string gpu_tile_k = \"COSMA_GPU_MAX_TILE_K\";\n// if ON, COSMA will try to natively use scalapack layout\n// without transformation. Only used in the pxgemm wrapper.\nconst std::string adapt_strategy = \"COSMA_ADAPT_STRATEGY\";\n// if ON, COSMA will try to overlap communication and computation\nconst std::string overlap = \"COSMA_OVERLAP_COMM_AND_COMP\";\n// specifies the maximum available CPU memory per rank in MB\nconst std::string cpu_max_memory = \"COSMA_CPU_MAX_MEMORY\";\n// if true, local host matrices will be pinned\n// (only used when GPU backend enabled)\n// which increases the efficiency\nconst std::string memory_pinning_enabled = \"COSMA_GPU_MEMORY_PINNING\";\n// The scaling factor used for the memory-pool allocation size.(cpu-only).\n// If amortization = 1.2, then the memory allocator\n// will request 1.2x the requested size (thus, 20% more than needed).\n// Higher values better amortize the cost of memory buffers resizing\n// which can occur when the algorithm is invoked for different matrix sizes.\n// However, higher amortization values also mean that\n// potentially more memory is allocated than used which can be\n// a problem when the memory resource is tight.\n// There is just a single memory pool in COSMA and all the required\n// memory is taken from this memory pool only.\nconst std::string memory_pool_amortization = \"COSMA_MEMORY_POOL_AMORTIZATION\";\n// minimum local matrix size -- if P is too large, so that after\n// splitting the local matrix size get lower than this,\n// then P will be reduced so that the problem size\n// never gets smaller than specified by this variable\nconst std::string min_local_dimension = \"COSMA_MIN_LOCAL_DIMENSION\";\n// if any dimension is smaller than this threshold, it will be dispatched to\n// SCALAPACK since it's too \"thin\" for COSMA in that case\nconst std::string cosma_dim_threshold = \"COSMA_DIM_THRESHOLD\";\n// number of bytes to which all host buffers are aligned\nconst std::string cosma_cpu_memory_alignment = \"COSMA_CPU_MEMORY_ALIGNMENT\";\n// IF ON, use unified memory\nconst std::string cosma_gpu_unified_memory = \"COSMA_GPU_UNIFIED_MEMORY\";\n}; // namespace env_var_names\n\n// default values of supported environment variables\nnamespace env_var_defaults {\n// number of GPU streams to be used per rank\nconst int gpu_n_streams = 2;\n// max sizes of GPU tiles (in #elements)\n// MxN corresponds to matrix C and K to the shared dimension\nconst int gpu_tile_m = 5000;\nconst int gpu_tile_n = 5000;\nconst int gpu_tile_k = 5000;\n// if ON, COSMA will try to natively use scalapack layout\n// without transformation. Only used in the pxgemm wrapper.\nconst bool adapt_strategy = true;\n// if ON, COSMA will try to overlap communication and computation\nconst bool overlap = false;\n// specifies the maximum available CPU memory per rank in MB\nconst long long cpu_max_memory = std::numeric_limits<long long>::max(); // inf\n// if true, local host matrices will be pinned\n// (only used when GPU backend enabled)\n// which increases the efficiency\nconst bool memory_pinning_enabled = true;\n// The scaling factor used for the memory-pool allocation size.(cpu-only).\n// If amortization = 1.2, then the memory allocator\n// will request 1.2x the requested size (thus, 20% more than needed).\n// Higher values better amortize the cost of memory buffers resizing\n// which can occur when the algorithm is invoked for different matrix sizes.\n// However, higher amortization values also mean that\n// potentially more memory is allocated than used which can be\n// a problem when the memory resource is tight.\n// There is just a single memory pool in COSMA and all the required\n// memory is taken from this memory pool only.\nconst double memory_pool_amortization = 1.2;\n// minimum local matrix size -- if P is too large, so that after\n// splitting the local matrix size get lower than this,\n// then P will be reduced so that the problem size\n// never gets smaller than specified by this variable\nconst int min_local_dimension = 200;\n// if any dimension is smaller than this threshold, it will be dispatched to\n// SCALAPACK since it's too \"thin\" for COSMA in that case\nconst int cosma_dim_threshold = 0;\n// cpu memory alignment (currently disabled)\nconst int cosma_cpu_memory_alignment = 0; // 256;\n// gpu unified memory mechanism\nconst bool unified_memory = false;\n}; // namespace env_var_defaults\n\n// checks if the specified environment variable is defined\nbool env_var_defined(const char *var_name);\n\n// checks if the environment variable with given name\n// is set to ON or OFF. If the variable is not defined,\n// the default value is returned\nbool get_bool_env_var(std::string name, bool default_value);\n\n// gets the value of the specified environment variable.\n// If the variable is not defined, the default value is returned\nint get_int_env_var(std::string name, int default_value);\n\n// gets the value of the specified environment variable.\n// If the variable is not defined, the default value is returned\nsize_t get_ull_env_var(std::string name, size_t default_value);\n\n// gets the value of the specified environment variable.\n// If the variable is not defined, the default value is returned\nfloat get_float_env_var(std::string name, float default_value);\n\n// gets the value of the specified environment variable.\n// If the variable is not defined, the default value is returned\ndouble get_double_env_var(std::string name, double default_value);\n\n// reads the environment variable corresponding to\n// the number of GPU streams per rank and returns\n// the default value if the variable is undefined\nint gpu_streams();\n\n// reads the environment variable corresponding to\n// the maximum tile sizes on GPU and returns\n// the default values if the variables are undefined.\n// MxN corresponds to matrix C and K to the shared dimension\nint gpu_max_tile_m();\nint gpu_max_tile_n();\nint gpu_max_tile_k();\n\n// reads the environment variable corresponding to\n// the adaptation of strategy and returns\n// the default value if the variable is undefined\nbool get_adapt_strategy();\n\n// reads the environment variable corresponding to\n// the overlap of communication and computation and returns\n// the default value if the variable is undefined\nbool get_overlap_comm_and_comp();\n\n// reads the memory pool amortization (>= 1.0).\n// If amortization = 1.2, then the memory allocator\n// will request 1.2x the requested size (thus, 20% more than needed).\n// Higher values better amortize the cost of memory buffers resizing\n// which can occur when the algorithm is invoked for different matrix sizes.\n// However, higher amortization values also mean that\n// potentially more memory is allocated than used which can be\n// a problem when the memory resource is tight.\ndouble get_memory_pool_amortization();\n\n// reads the environment variable corresponding to\n// the memory limit in MB per rank, converts the limit\n// to #elements that each rank is allowed to use.\n// returns the default value if the variable is undefined\ntemplate <typename T>\nlong long get_cpu_max_memory();\n\n// whether host matrix buffers should be pinned or not\n// this is only used in the GPU backend to increase\n// the transfer speed between CPU and GPU\nbool get_memory_pinning();\n\n// if, after the matrices are split among ranks,\n// any dimension becomes less than this threashold,\n// then the total number of ranks is going to be reduced\n// so that no dimension gets less than this threshold\n// after splitting.\nint get_min_local_dimension();\n\n// if initial dimension (before splitting) is less\n// than this threshold, the problem is considered too small\n// and is dispatched to SCALAPACK\n// This is only used for pxgemm wrappers.\nint get_cosma_dim_threshold();\n\n// number of bytes to which all the buffers should be aligned\nint get_cosma_cpu_memory_alignment();\n\n// check if we use unified memory or not\nbool get_unified_memory();\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/gpu/gpu_aware_mpi_utils.cpp",
    "content": "#include <complex>\n#include <iostream>\n\n#include <cosma/communicator.hpp>\n#include <cosma/gpu/utils.hpp>\n#include <cosma/gpu/gpu_aware_mpi_utils.hpp>\n#include <cosma/mpi_mapper.hpp>\n#include <cosma/profiler.hpp>\n\ntemplate <typename Scalar>\nvoid cosma::gpu::gpu_aware_mpi_copy(\n            cosma_context<Scalar> *ctx,\n            Interval &P,\n            Scalar * in, // original_matrix\n            Scalar * out,  // expanded matrix\n            Scalar *reshuffle_buffer,\n            std::vector<std::vector<int>>& size_before,\n            std::vector<int> &total_before,\n            int total_after,\n            size_t step) {\n    PE(multiply_communication_other);\n    auto mpi_comm = ctx->get_cosma_comm()->active_comm(step);\n\n    int rank = ctx->get_cosma_comm()->rank();\n    int div = ctx->get_cosma_comm()->get_strategy().divisor(step);\n\n    int gp, off;\n    std::tie(gp, off) = P.locate_in_subinterval(div, rank);\n\n    int relative_rank = rank - P.first();\n    int local_size = total_before[relative_rank];\n\n    int sum = 0;\n    std::vector<int> total_size(div);\n    std::vector<int> dspls(div);\n\n    std::vector<int> subgroup(div);\n    bool same_size = true;\n\n    int max_block_size = 0;\n    for (int i = 0; i < div; ++i) {\n        int target = P.locate_in_interval(div, i, off);\n        int temp_size = total_before[target];\n        dspls[i] = sum;\n        sum += temp_size;\n        total_size[i] = temp_size;\n        same_size &= temp_size == local_size;\n        max_block_size = std::max(max_block_size, temp_size);\n    }\n\n    int n_blocks = size_before[relative_rank].size();\n\n    // this will only resize the buffer if not already allocated\n    ctx->get_memory_pool().allocate_device_receive_buffer(max_block_size);\n    Scalar* d_send_pointer = ctx->get_memory_pool().device_receive_buffer.data();\n\n    ctx->get_memory_pool().allocate_device_send_buffer(div * max_block_size);\n    Scalar* d_receive_pointer = ctx->get_memory_pool().device_send_buffer.data();\n\n    auto stream = ctx->gpu_stream.stream();\n\n    // copy input matrix to device\n    gpu::copy_to_device_async(in, d_send_pointer, local_size, stream);\n\n    PL();\n\n    PE(multiply_communication_copy);\n    auto mpi_type = mpi_mapper<Scalar>::getType();\n\n    // since it's not possible to pass the stream to MPI\n    // to perform the collective on that stream\n    // we have to make sure the data is copied to gpu\n    // before MPI function is called\n    gpu::runtime_api::stream_synchronize(stream);\n\n    MPI_Allgather(d_send_pointer,\n            max_block_size,\n            mpi_type,\n            d_receive_pointer,\n            max_block_size,\n            mpi_type,\n            mpi_comm);\n\n    PL();\n\n    PE(multiply_communication_other);\n    int index = 0;\n    std::vector<int> block_offset(div);\n    // order all first sequential parts of all groups first and so on..\n    for (int block = 0; block < n_blocks; block++) {\n        for (int rank = 0; rank < div; rank++) {\n            int target = P.locate_in_interval(div, rank, off);\n            int dsp = dspls[rank] + block_offset[rank];\n            int b_size = size_before[target][block];\n            gpu::copy_to_host_async(\n                d_receive_pointer + rank * max_block_size + block_offset[rank],\n                out + index, \n                b_size,\n                stream);\n            index += b_size;\n            block_offset[rank] += b_size;\n        }\n    }\n\n    // wait for the result on the host\n    gpu::runtime_api::stream_synchronize(stream);\n\n    PL();\n}\n\ntemplate <typename Scalar>\nvoid cosma::gpu::gpu_aware_mpi_reduce(\n            cosma_context<Scalar> *ctx,\n            Interval &P,\n            Scalar *LC, // expanded_matrix\n            Scalar *C,  // original matrix\n            Scalar *reshuffle_buffer,\n            Scalar *reduce_buffer,\n            std::vector<std::vector<int>> &c_current,\n            std::vector<int> &c_total_current,\n            std::vector<std::vector<int>> &c_expanded,\n            std::vector<int> &c_total_expanded,\n            Scalar beta,\n            size_t step,\n            bool copy_c_back) {\n    PE(multiply_communication_other);\n    auto mpi_comm = ctx->get_cosma_comm()->active_comm(step);\n\n    int rank = ctx->get_cosma_comm()->rank();\n    int div = ctx->get_cosma_comm()->get_strategy().divisor(step);\n\n    // int div = strategy_->divisor(step);\n    // MPI_Comm subcomm = active_comm(step);\n\n    std::vector<int> subgroup(div);\n\n    int gp, off;\n    std::tie(gp, off) = P.locate_in_subinterval(div, rank);\n    // int gp, off;\n    // std::tie(gp, off) = group_and_offset(P, div);\n\n    // reorder the elements as:\n    // first all blocks that should be sent to rank 0 then all blocks for\n    // rank 1 and so on...\n    int n_blocks = c_expanded[off].size();\n    std::vector<int> block_offset(n_blocks);\n\n    int sum = 0;\n    for (int i = 0; i < n_blocks; ++i) {\n        block_offset[i] = sum;\n        sum += c_expanded[off][i];\n    }\n\n    std::vector<int> recvcnts(div);\n    int max_block_size = 0;\n    int min_block_size = recvcnts[0];\n    for (int i = 0; i < div; ++i) {\n        int target = P.locate_in_interval(div, i, off);\n        recvcnts[i] = c_total_current[target];\n        // the max block size (used to determine the padding)\n        max_block_size = std::max(max_block_size, recvcnts[i]);\n        min_block_size = std::min(min_block_size, recvcnts[i]);\n    }\n\n    bool same_blocks = max_block_size == min_block_size;\n\n    // here is the result of matrix multiplication on GPU\n    Scalar* d_LC = LC;\n    if (!copy_c_back) {\n        d_LC = ctx->get_gpu_context()->get_full_device_buffer_c().data();\n    }\n\n    // this will only resize the buffer if not already allocated\n    ctx->get_memory_pool().allocate_device_send_buffer(div * max_block_size);\n    Scalar* d_reshuffle_buffer = ctx->get_memory_pool().device_send_buffer.data();\n\n    ctx->get_memory_pool().allocate_device_receive_buffer(max_block_size);\n    Scalar* d_receive_pointer = ctx->get_memory_pool().device_receive_buffer.data();\n\n    auto stream = ctx->gpu_stream.stream();\n\n    // set all to 0s, so that we don't have to pad each block with 0s up to max_block_size\n    /*\n    if (!same_blocks) {\n        gpu::runtime_api::memset_async(d_reshuffle_buffer, 0, div * max_block_size, stream);\n    }\n    */\n\n    std::vector<int> blocks_offset_per_group(div, 0);\n    // go through the communication ring\n    for (int i = 0; i < div; ++i) {\n        int target = P.locate_in_interval(div, i, off);\n\n        for (int block = 0; block < n_blocks; ++block) {\n            int b_offset = block_offset[block];\n            int b_size = c_current[target][block];\n            // reshuffle directly into the gpu buffer\n            if (!copy_c_back) {\n                gpu::copy_device_to_device_async(d_LC + b_offset, \n                                                 d_reshuffle_buffer + i * max_block_size + blocks_offset_per_group[i],\n                                                 b_size, stream);\n            } else {\n                gpu::copy_to_device_async(d_LC + b_offset, \n                                          d_reshuffle_buffer + i * max_block_size + blocks_offset_per_group[i],\n                                          b_size, stream);\n            }\n            block_offset[block] += b_size;\n            blocks_offset_per_group[i] += b_size;\n        }\n    }\n\n    Scalar *receive_pointer = beta != Scalar{0} ? reduce_buffer : C;\n\n    // since it's not possible to pass the stream to MPI\n    // to perform the collective on that stream\n    // we have to make sure the data is copied to gpu\n    // before MPI function is called\n    gpu::runtime_api::stream_synchronize(stream);\n\n    PL();\n\n    PE(multiply_communication_reduce);\n    auto mpi_type = mpi_mapper<Scalar>::getType();\n    MPI_Reduce_scatter_block(d_reshuffle_buffer,\n            d_receive_pointer,\n            max_block_size,\n            mpi_type,\n            MPI_SUM,\n            mpi_comm);\n    PL();\n\n    PE(multiply_communication_other);\n    gpu::copy_to_host_async(d_receive_pointer, receive_pointer, recvcnts[gp], stream);\n\n    // wait for the result on the host\n    gpu::runtime_api::stream_synchronize(stream);\n\n    if (beta != Scalar{0}) {\n        // sum up receiving_buffer with C\n        for (int el = 0; el < recvcnts[gp]; ++el) {\n            C[el] = beta * C[el] + reduce_buffer[el];\n        }\n    }\n    PL();\n}\n\n// template instantiation for gpu_aware_mpi_reduce\ntemplate void cosma::gpu::gpu_aware_mpi_reduce<float>(\n            cosma_context<float> *ctx,\n            Interval &P,\n            float *LC, // expanded_matrix\n            float *C,  // original matrix\n            float *reshuffle_buffer,\n            float *reduce_buffer,\n            std::vector<std::vector<int>> &c_current,\n            std::vector<int> &c_total_current,\n            std::vector<std::vector<int>> &c_expanded,\n            std::vector<int> &c_total_expanded,\n            float beta,\n            size_t step,\n            bool copy_c_back);\n\ntemplate void cosma::gpu::gpu_aware_mpi_reduce<double>(\n            cosma_context<double> *ctx,\n            Interval &P,\n            double *LC, // expanded_matrix\n            double *C,  // original matrix\n            double *reshuffle_buffer,\n            double *reduce_buffer,\n            std::vector<std::vector<int>> &c_current,\n            std::vector<int> &c_total_current,\n            std::vector<std::vector<int>> &c_expanded,\n            std::vector<int> &c_total_expanded,\n            double beta,\n            size_t step,\n            bool copy_c_back);\n\ntemplate void cosma::gpu::gpu_aware_mpi_reduce<std::complex<float>>(\n            cosma_context<std::complex<float>> *ctx,\n            Interval &P,\n            std::complex<float> *LC, // expanded_matrix\n            std::complex<float> *C,  // original matrix\n            std::complex<float> *reshuffle_buffer,\n            std::complex<float> *reduce_buffer,\n            std::vector<std::vector<int>> &c_current,\n            std::vector<int> &c_total_current,\n            std::vector<std::vector<int>> &c_expanded,\n            std::vector<int> &c_total_expanded,\n            std::complex<float> beta,\n            size_t step,\n            bool copy_c_back);\n\ntemplate void cosma::gpu::gpu_aware_mpi_reduce<std::complex<double>>(\n            cosma_context<std::complex<double>> *ctx,\n            Interval &P,\n            std::complex<double> *LC, // expanded_matrix\n            std::complex<double> *C,  // original matrix\n            std::complex<double> *reshuffle_buffer,\n            std::complex<double> *reduce_buffer,\n            std::vector<std::vector<int>> &c_current,\n            std::vector<int> &c_total_current,\n            std::vector<std::vector<int>> &c_expanded,\n            std::vector<int> &c_total_expanded,\n            std::complex<double> beta,\n            size_t step,\n            bool copy_c_back);\n\n// template instantiation for gpu_aware_mpi_copy\ntemplate void cosma::gpu::gpu_aware_mpi_copy<float>(\n            cosma_context<float> *ctx,\n            Interval &P,\n            float * in, // original_matrix\n            float * out,  // expanded matrix\n            float *reshuffle_buffer,\n            std::vector<std::vector<int>>& size_before,\n            std::vector<int> &total_before,\n            int total_after,\n            size_t step);\n\ntemplate void cosma::gpu::gpu_aware_mpi_copy<double>(\n            cosma_context<double> *ctx,\n            Interval &P,\n            double * in, // original_matrix\n            double * out,  // expanded matrix\n            double *reshuffle_buffer,\n            std::vector<std::vector<int>>& size_before,\n            std::vector<int> &total_before,\n            int total_after,\n            size_t step);\n\ntemplate void cosma::gpu::gpu_aware_mpi_copy<std::complex<float>>(\n            cosma_context<std::complex<float>> *ctx,\n            Interval &P,\n            std::complex<float> * in, // original_matrix\n            std::complex<float> * out,  // expanded matrix\n            std::complex<float> *reshuffle_buffer,\n            std::vector<std::vector<int>>& size_before,\n            std::vector<int> &total_before,\n            int total_after,\n            size_t step);\n\ntemplate void cosma::gpu::gpu_aware_mpi_copy<std::complex<double>>(\n            cosma_context<std::complex<double>> *ctx,\n            Interval &P,\n            std::complex<double> * in, // original_matrix\n            std::complex<double> * out,  // expanded matrix\n            std::complex<double> *reshuffle_buffer,\n            std::vector<std::vector<int>>& size_before,\n            std::vector<int> &total_before,\n            int total_after,\n            size_t step);\n\n"
  },
  {
    "path": "src/cosma/gpu/gpu_aware_mpi_utils.hpp",
    "content": "#pragma once\n#include <vector>\n\n#include <cosma/interval.hpp>\n#include <cosma/context.hpp>\n\n#include <mpi.h>\n\nnamespace cosma {\nnamespace gpu {\n    template <typename Scalar>\n    void gpu_aware_mpi_copy(\n                cosma_context<Scalar> *ctx,\n                Interval &P,\n                Scalar * in, // original_matrix\n                Scalar * out,  // expanded matrix\n                Scalar *reshuffle_buffer,\n                std::vector<std::vector<int>>& size_before,\n                std::vector<int> &total_before,\n                int total_after,\n                size_t step);\n\n    template <typename Scalar>\n    void gpu_aware_mpi_reduce(\n                cosma_context<Scalar> *ctx,\n                Interval &P,\n                Scalar *LC, // expanded_matrix\n                Scalar *C,  // original matrix\n                Scalar *reshuffle_buffer,\n                Scalar *reduce_buffer,\n                std::vector<std::vector<int>> &c_current,\n                std::vector<int> &c_total_current,\n                std::vector<std::vector<int>> &c_expanded,\n                std::vector<int> &c_total_expanded,\n                Scalar beta,\n                size_t step,\n                bool copy_c_back);\n\n}  // namespace gpu\n}  // namespace cosma\n"
  },
  {
    "path": "src/cosma/gpu/gpu_runtime_api.hpp",
    "content": "/*\n * Copyright (c) 2019 ETH Zurich, Simon Frasch\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * 1. Redistributions of source code must retain the above copyright notice,\n *    this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n *    notice, this list of conditions and the following disclaimer in the\n *    documentation and/or other materials provided with the distribution.\n * 3. Neither the name of the copyright holder nor the names of its contributors\n *    may be used to endorse or promote products derived from this software\n *    without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n */\n#pragma once\n\n#include <utility>\n\n#if defined(TILED_MM_CUDA)\n#include <cuda_runtime_api.h>\n#define GPU_PREFIX(val) cuda##val\n\n#elif defined(TILED_MM_ROCM)\n#include <hip/hip_runtime_api.h>\n#define GPU_PREFIX(val) hip##val\n\n#else\n#error Either TILED_MM_CUDA or TILED_MM_ROCM must be defined!\n#endif\n\nnamespace cosma {\nnamespace gpu {\nnamespace runtime_api {\n\nusing StatusType = GPU_PREFIX(Error_t);\nusing StreamType = GPU_PREFIX(Stream_t);\nusing EventType = GPU_PREFIX(Event_t);\n\n#ifdef TILED_MM_CUDA\nusing PointerAttributes = GPU_PREFIX(PointerAttributes);\n#else\nusing PointerAttributes = GPU_PREFIX(PointerAttribute_t);\n#endif\n\nnamespace status {\n// error / return values\nconstexpr StatusType Success = GPU_PREFIX(Success);\nconstexpr StatusType ErrorMemoryAllocation = GPU_PREFIX(ErrorMemoryAllocation);\nconstexpr StatusType ErrorLaunchOutOfResources = GPU_PREFIX(ErrorLaunchOutOfResources);\nconstexpr StatusType ErrorInvalidValue = GPU_PREFIX(ErrorInvalidValue);\nconstexpr StatusType ErrorInvalidResourceHandle = GPU_PREFIX(ErrorInvalidResourceHandle);\nconstexpr StatusType ErrorInvalidDevice = GPU_PREFIX(ErrorInvalidDevice);\nconstexpr StatusType ErrorInvalidMemcpyDirection = GPU_PREFIX(ErrorInvalidMemcpyDirection);\nconstexpr StatusType ErrorInvalidDevicePointer = GPU_PREFIX(ErrorInvalidDevicePointer);\nconstexpr StatusType ErrorInitializationError = GPU_PREFIX(ErrorInitializationError);\nconstexpr StatusType ErrorNoDevice = GPU_PREFIX(ErrorNoDevice);\nconstexpr StatusType ErrorNotReady = GPU_PREFIX(ErrorNotReady);\nconstexpr StatusType ErrorUnknown = GPU_PREFIX(ErrorUnknown);\nconstexpr StatusType ErrorPeerAccessNotEnabled = GPU_PREFIX(ErrorPeerAccessNotEnabled);\nconstexpr StatusType ErrorPeerAccessAlreadyEnabled = GPU_PREFIX(ErrorPeerAccessAlreadyEnabled);\nconstexpr StatusType ErrorHostMemoryAlreadyRegistered =\n    GPU_PREFIX(ErrorHostMemoryAlreadyRegistered);\nconstexpr StatusType ErrorHostMemoryNotRegistered = GPU_PREFIX(ErrorHostMemoryNotRegistered);\nconstexpr StatusType ErrorUnsupportedLimit = GPU_PREFIX(ErrorUnsupportedLimit);\n}  // namespace status\n\n// flags to pass to GPU API\nnamespace flag {\nconstexpr auto HostRegisterDefault = GPU_PREFIX(HostRegisterDefault);\nconstexpr auto HostRegisterPortable = GPU_PREFIX(HostRegisterPortable);\nconstexpr auto HostRegisterMapped = GPU_PREFIX(HostRegisterMapped);\nconstexpr auto HostRegisterIoMemory = GPU_PREFIX(HostRegisterIoMemory);\n\nconstexpr auto StreamDefault = GPU_PREFIX(StreamDefault);\nconstexpr auto StreamNonBlocking = GPU_PREFIX(StreamNonBlocking);\n\nconstexpr auto MemoryTypeHost = GPU_PREFIX(MemoryTypeHost);\nconstexpr auto MemoryTypeDevice = GPU_PREFIX(MemoryTypeDevice);\n#if (CUDART_VERSION >= 10000)\nconstexpr auto MemoryTypeUnregistered = GPU_PREFIX(MemoryTypeUnregistered);\nconstexpr auto MemoryTypeManaged = GPU_PREFIX(MemoryTypeManaged);\n#endif\n\nconstexpr auto MemcpyHostToDevice = GPU_PREFIX(MemcpyHostToDevice);\nconstexpr auto MemcpyDeviceToHost = GPU_PREFIX(MemcpyDeviceToHost);\nconstexpr auto MemcpyDeviceToDevice = GPU_PREFIX(MemcpyDeviceToDevice);\n\nconstexpr auto EventDefault = GPU_PREFIX(EventDefault);\nconstexpr auto EventBlockingSync = GPU_PREFIX(EventBlockingSync);\nconstexpr auto EventDisableTiming = GPU_PREFIX(EventDisableTiming);\nconstexpr auto EventInterprocess = GPU_PREFIX(EventInterprocess);\n}  // namespace flag\n\n// ==================================\n// Forwarding functions of to GPU API\n// ==================================\ntemplate <typename... ARGS>\ninline auto host_register(ARGS... args) -> StatusType {\n  return GPU_PREFIX(HostRegister)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto host_unregister(ARGS... args) -> StatusType {\n  return GPU_PREFIX(HostUnregister)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto stream_create_with_flags(ARGS... args) -> StatusType {\n  return GPU_PREFIX(StreamCreateWithFlags)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto stream_destroy(ARGS... args) -> StatusType {\n  return GPU_PREFIX(StreamDestroy)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto stream_wait_event(ARGS... args) -> StatusType {\n  return GPU_PREFIX(StreamWaitEvent)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto event_create_with_flags(ARGS... args) -> StatusType {\n  return GPU_PREFIX(EventCreateWithFlags)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto event_destroy(ARGS... args) -> StatusType {\n  return GPU_PREFIX(EventDestroy)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto event_record(ARGS... args) -> StatusType {\n  return GPU_PREFIX(EventRecord)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto event_synchronize(ARGS... args) -> StatusType {\n  return GPU_PREFIX(EventSynchronize)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto event_elapsed_time(ARGS... args) -> StatusType {\n  return GPU_PREFIX(EventElapsedTime)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto malloc(ARGS... args) -> StatusType {\n  return GPU_PREFIX(Malloc)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto host_alloc(ARGS... args) -> StatusType {\n#ifdef TILED_MM_CUDA\n  return cudaHostAlloc(std::forward<ARGS>(args)...);\n#else\n  // hipHostAlloc is deprecated, use hipHostMalloc instead\n  return hipHostMalloc(std::forward<ARGS>(args)...);\n#endif\n}\n\ntemplate <typename... ARGS>\ninline auto free(ARGS... args) -> StatusType {\n  return GPU_PREFIX(Free)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto memcpy(ARGS... args) -> StatusType {\n  return GPU_PREFIX(Memcpy)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto memcpy_async(ARGS... args) -> StatusType {\n  return GPU_PREFIX(MemcpyAsync)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto memcpy_2d(ARGS... args) -> StatusType {\n  return GPU_PREFIX(Memcpy2D)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto memcpy_2d_async(ARGS... args) -> StatusType {\n  return GPU_PREFIX(Memcpy2DAsync)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto get_device(ARGS... args) -> StatusType {\n  return GPU_PREFIX(GetDevice)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto set_device(ARGS... args) -> StatusType {\n  return GPU_PREFIX(SetDevice)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto get_device_count(ARGS... args) -> StatusType {\n  return GPU_PREFIX(GetDeviceCount)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto stream_synchronize(ARGS... args) -> StatusType {\n  return GPU_PREFIX(StreamSynchronize)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto memset_async(ARGS... args) -> StatusType {\n  return GPU_PREFIX(MemsetAsync)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto pointer_get_attributes(ARGS... args) -> StatusType {\n  return GPU_PREFIX(PointerGetAttributes)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto mem_get_info(ARGS... args) -> StatusType {\n  return GPU_PREFIX(MemGetInfo)(std::forward<ARGS>(args)...);\n}\n\ntemplate <typename... ARGS>\ninline auto get_error_string(ARGS... args) -> const char* {\n  return GPU_PREFIX(GetErrorString)(std::forward<ARGS>(args)...);\n}\n\ninline auto get_last_error() -> StatusType { return GPU_PREFIX(GetLastError)(); }\n\ninline auto device_synchronize() -> StatusType { return GPU_PREFIX(DeviceSynchronize)(); }\n\n}  // namespace runtime_api\n}  // namespace gpu\n}  // namespace cosma\n"
  },
  {
    "path": "src/cosma/gpu/nccl_mapper.hpp",
    "content": "#pragma once\n\n#include <complex>\n\n#if defined(TILED_MM_CUDA)\n#include <nccl.h>\n\n#elif defined(TILED_MM_ROCM)\n#include <rccl/rccl.h>\n\n#else\n#error Either TILED_MM_CUDA or TILED_MM_ROCM must be defined!\n#endif\n\nnamespace cosma {\nnamespace gpu {\n/**\n * Maps a primitive numeric type to a MPI type.\n *\n * @tparam Scalar the numeric type to be mapped\n */\ntemplate <typename Scalar>\nstruct nccl_mapper {\n  static inline ncclDataType_t getType();\n};\n\ntemplate <>\ninline ncclDataType_t nccl_mapper<double>::getType() {\n  return ncclDouble;\n}\n\ntemplate <>\ninline ncclDataType_t nccl_mapper<float>::getType() {\n  return ncclFloat;\n}\n\ntemplate <>\ninline ncclDataType_t nccl_mapper<std::complex<double>>::getType() {\n  return ncclDouble;\n}\n\ntemplate <>\ninline ncclDataType_t nccl_mapper<std::complex<float>>::getType() {\n  return ncclFloat;\n}\n\n// Removes const qualifier\n//\ntemplate <typename Scalar>\nstruct nccl_mapper<const Scalar> {\n  static inline ncclDataType_t getType();\n};\n\ntemplate <typename Scalar>\ninline ncclDataType_t nccl_mapper<const Scalar>::getType() {\n  return nccl_mapper<Scalar>::getType();\n}\n\n} // end namespace gpu\n} // end namespace cosma\n"
  },
  {
    "path": "src/cosma/gpu/nccl_utils.cpp",
    "content": "#include <iostream>\n\n#include <cosma/communicator.hpp>\n#include <cosma/gpu/utils.hpp>\n#include <cosma/gpu/nccl_utils.hpp>\n#include <cosma/gpu/nccl_mapper.hpp>\n#include <cosma/profiler.hpp>\n\nvoid cosma::gpu::free_nccl_comm(ncclComm_t nccl_comm) {\n    auto status = ncclCommDestroy(nccl_comm);\n    check_nccl_status(status);\n}\n\nvoid cosma::gpu::check_nccl_status(ncclResult_t result) {\n    if (result != ncclSuccess) {\n        std::cerr << \"[NCCL ERROR]: \" << ncclGetErrorString(result) << std::endl;\n        throw(std::runtime_error(\"NCCL ERROR\"));\n    }\n}\n\nncclComm_t cosma::gpu::mpi_to_nccl_comm(MPI_Comm comm) {\n    if (comm == MPI_COMM_NULL) {\n        return nullptr;\n    }\n    int my_rank, n_ranks;\n    MPI_Comm_rank(comm, &my_rank);\n    MPI_Comm_size(comm, &n_ranks);\n\n    ncclUniqueId id;\n    if (my_rank == 0) {\n        auto status = ncclGetUniqueId(&id);\n        check_nccl_status(status);\n    }\n\n    MPI_Bcast(&id, sizeof(id), MPI_BYTE, 0, comm);\n\n    ncclComm_t nccl_comm;\n    auto status = ncclCommInitRank(&nccl_comm, n_ranks, id, my_rank);\n    check_nccl_status(status);\n\n    return nccl_comm;\n}\n\ntemplate <typename Scalar>\nvoid cosma::gpu::nccl_copy(\n            cosma_context<Scalar> *ctx,\n            Interval &P,\n            Scalar * in, // original_matrix\n            Scalar * out,  // expanded matrix\n            Scalar *reshuffle_buffer,\n            std::vector<std::vector<int>>& size_before,\n            std::vector<int> &total_before,\n            int total_after,\n            size_t step) {\n    PE(multiply_communication_other);\n    auto mpi_comm = ctx->get_cosma_comm()->active_comm(step);\n    auto nccl_comm = ctx->get_cosma_comm()->active_nccl_comm(step);\n\n    int rank = ctx->get_cosma_comm()->rank();\n    int div = ctx->get_cosma_comm()->get_strategy().divisor(step);\n\n    int gp, off;\n    std::tie(gp, off) = P.locate_in_subinterval(div, rank);\n\n    int relative_rank = rank - P.first();\n    int local_size = total_before[relative_rank];\n\n    int sum = 0;\n    std::vector<int> total_size(div);\n    std::vector<int> dspls(div);\n\n    std::vector<int> subgroup(div);\n    bool same_size = true;\n\n    int max_block_size = 0;\n    for (int i = 0; i < div; ++i) {\n        int target = P.locate_in_interval(div, i, off);\n        int temp_size = total_before[target];\n        dspls[i] = sum;\n        sum += temp_size;\n        total_size[i] = temp_size;\n        same_size &= temp_size == local_size;\n        max_block_size = std::max(max_block_size, temp_size);\n    }\n\n    int n_blocks = size_before[relative_rank].size();\n\n    // this will only resize the buffer if not already allocated\n    ctx->get_memory_pool().allocate_device_receive_buffer(max_block_size);\n    Scalar* d_send_pointer = ctx->get_memory_pool().device_receive_buffer.data();\n\n    ctx->get_memory_pool().allocate_device_send_buffer(div * max_block_size);\n    Scalar* d_receive_pointer = ctx->get_memory_pool().device_send_buffer.data();\n\n    auto stream = ctx->gpu_stream.stream();\n\n    // copy input matrix to device\n    gpu::copy_to_device_async(in, d_send_pointer, local_size, stream);\n\n    PL();\n\n    PE(multiply_communication_copy);\n    auto nccl_type = nccl_mapper<Scalar>::getType();\n    int block_size = max_block_size;\n    if (is_complex<Scalar>()) {\n        block_size *= 2;\n    }\n\n    ncclAllGather(reinterpret_cast<void*>(d_send_pointer),\n            reinterpret_cast<void*>(d_receive_pointer),\n            block_size,\n            nccl_type,\n            nccl_comm,\n            stream);\n\n    int index = 0;\n    std::vector<int> block_offset(div);\n    // order all first sequential parts of all groups first and so on..\n    for (int block = 0; block < n_blocks; block++) {\n        for (int rank = 0; rank < div; rank++) {\n            int target = P.locate_in_interval(div, rank, off);\n            int dsp = dspls[rank] + block_offset[rank];\n            int b_size = size_before[target][block];\n            gpu::copy_to_host_async(\n                d_receive_pointer + rank * max_block_size + block_offset[rank],\n                out + index, \n                b_size,\n                stream);\n            index += b_size;\n            block_offset[rank] += b_size;\n        }\n    }\n\n    // wait for the result on the host\n    gpu::runtime_api::stream_synchronize(stream);\n\n    PL();\n}\n\ntemplate <typename Scalar>\nvoid cosma::gpu::nccl_reduce(\n            cosma_context<Scalar> *ctx,\n            Interval &P,\n            Scalar *LC, // expanded_matrix\n            Scalar *C,  // original matrix\n            Scalar *reshuffle_buffer,\n            Scalar *reduce_buffer,\n            std::vector<std::vector<int>> &c_current,\n            std::vector<int> &c_total_current,\n            std::vector<std::vector<int>> &c_expanded,\n            std::vector<int> &c_total_expanded,\n            Scalar beta,\n            size_t step,\n            bool copy_c_back) {\n    PE(multiply_communication_other);\n    auto mpi_comm = ctx->get_cosma_comm()->active_comm(step);\n    auto nccl_comm = ctx->get_cosma_comm()->active_nccl_comm(step);\n\n    int rank = ctx->get_cosma_comm()->rank();\n    int div = ctx->get_cosma_comm()->get_strategy().divisor(step);\n\n    // int div = strategy_->divisor(step);\n    // MPI_Comm subcomm = active_comm(step);\n\n    std::vector<int> subgroup(div);\n\n    int gp, off;\n    std::tie(gp, off) = P.locate_in_subinterval(div, rank);\n    // int gp, off;\n    // std::tie(gp, off) = group_and_offset(P, div);\n\n    // reorder the elements as:\n    // first all blocks that should be sent to rank 0 then all blocks for\n    // rank 1 and so on...\n    int n_blocks = c_expanded[off].size();\n    std::vector<int> block_offset(n_blocks);\n\n    int sum = 0;\n    for (int i = 0; i < n_blocks; ++i) {\n        block_offset[i] = sum;\n        sum += c_expanded[off][i];\n    }\n\n    std::vector<int> recvcnts(div);\n    int max_block_size = 0;\n    int min_block_size = recvcnts[0];\n    for (int i = 0; i < div; ++i) {\n        int target = P.locate_in_interval(div, i, off);\n        recvcnts[i] = c_total_current[target];\n        // the max block size (used to determine the padding)\n        max_block_size = std::max(max_block_size, recvcnts[i]);\n        min_block_size = std::min(min_block_size, recvcnts[i]);\n    }\n\n    bool same_blocks = max_block_size == min_block_size;\n\n    // here is the result of matrix multiplication on GPU\n    Scalar* d_LC = LC;\n    if (!copy_c_back) {\n        d_LC = ctx->get_gpu_context()->get_full_device_buffer_c().data();\n    }\n\n    // this will only resize the buffer if not already allocated\n    ctx->get_memory_pool().allocate_device_send_buffer(div * max_block_size);\n    Scalar* d_reshuffle_buffer = ctx->get_memory_pool().device_send_buffer.data();\n\n    ctx->get_memory_pool().allocate_device_receive_buffer(max_block_size);\n    Scalar* d_receive_pointer = ctx->get_memory_pool().device_receive_buffer.data();\n\n    auto stream = ctx->gpu_stream.stream();\n\n    // set all to 0s, so that we don't have to pad each block with 0s up to max_block_size\n    /*\n    if (!same_blocks) {\n        gpu::runtime_api::memset_async(d_reshuffle_buffer, 0, div * max_block_size, stream);\n    }\n    */\n\n    std::vector<int> blocks_offset_per_group(div, 0);\n    // go through the communication ring\n    for (int i = 0; i < div; ++i) {\n        int target = P.locate_in_interval(div, i, off);\n\n        for (int block = 0; block < n_blocks; ++block) {\n            int b_offset = block_offset[block];\n            int b_size = c_current[target][block];\n            // reshuffle directly into the gpu buffer\n            if (!copy_c_back) {\n                gpu::copy_device_to_device_async(d_LC + b_offset, \n                                                 d_reshuffle_buffer + i * max_block_size + blocks_offset_per_group[i],\n                                                 b_size, stream);\n            } else {\n                gpu::copy_to_device_async(d_LC + b_offset, \n                                          d_reshuffle_buffer + i * max_block_size + blocks_offset_per_group[i],\n                                          b_size, stream);\n            }\n            // pad with 0s if not all the blocks are the same\n            // padding is not necessary because the array is initialized to 0s above\n            // in a single kernel\n            /*\n            if (b_size < max_block_size) {\n                gpu::runtime_api::memset_async(d_reshuffle_buffer + index + b_size, 0, max_block_size - b_size);\n            }\n            */\n            block_offset[block] += b_size;\n            blocks_offset_per_group[i] += b_size;\n        }\n    }\n\n    Scalar *receive_pointer = beta != Scalar{0} ? reduce_buffer : C;\n    PL();\n\n    PE(multiply_communication_reduce);\n    auto nccl_type = nccl_mapper<Scalar>::getType();\n    int block_size = max_block_size;\n    if (is_complex<Scalar>()) {\n        block_size *= 2;\n    }\n    ncclReduceScatter(reinterpret_cast<void*>(d_reshuffle_buffer),\n            reinterpret_cast<void*>(d_receive_pointer),\n            block_size,\n            nccl_type,\n            ncclSum,\n            nccl_comm,\n            stream);\n    gpu::copy_to_host_async(d_receive_pointer, receive_pointer, recvcnts[gp], stream);\n\n    // wait for the result on the host\n    gpu::runtime_api::stream_synchronize(stream);\n    PL();\n\n    PE(multiply_communication_other);\n    if (beta != Scalar{0}) {\n        // sum up receiving_buffer with C\n        for (int el = 0; el < recvcnts[gp]; ++el) {\n            C[el] = beta * C[el] + reduce_buffer[el];\n        }\n    }\n    PL();\n}\n\n// template instantiation for nccl_reduce\ntemplate void cosma::gpu::nccl_reduce<float>(\n            cosma_context<float> *ctx,\n            Interval &P,\n            float *LC, // expanded_matrix\n            float *C,  // original matrix\n            float *reshuffle_buffer,\n            float *reduce_buffer,\n            std::vector<std::vector<int>> &c_current,\n            std::vector<int> &c_total_current,\n            std::vector<std::vector<int>> &c_expanded,\n            std::vector<int> &c_total_expanded,\n            float beta,\n            size_t step,\n            bool copy_c_back);\n\ntemplate void cosma::gpu::nccl_reduce<double>(\n            cosma_context<double> *ctx,\n            Interval &P,\n            double *LC, // expanded_matrix\n            double *C,  // original matrix\n            double *reshuffle_buffer,\n            double *reduce_buffer,\n            std::vector<std::vector<int>> &c_current,\n            std::vector<int> &c_total_current,\n            std::vector<std::vector<int>> &c_expanded,\n            std::vector<int> &c_total_expanded,\n            double beta,\n            size_t step,\n            bool copy_c_back);\n\ntemplate void cosma::gpu::nccl_reduce<std::complex<float>>(\n            cosma_context<std::complex<float>> *ctx,\n            Interval &P,\n            std::complex<float> *LC, // expanded_matrix\n            std::complex<float> *C,  // original matrix\n            std::complex<float> *reshuffle_buffer,\n            std::complex<float> *reduce_buffer,\n            std::vector<std::vector<int>> &c_current,\n            std::vector<int> &c_total_current,\n            std::vector<std::vector<int>> &c_expanded,\n            std::vector<int> &c_total_expanded,\n            std::complex<float> beta,\n            size_t step,\n            bool copy_c_back);\n\ntemplate void cosma::gpu::nccl_reduce<std::complex<double>>(\n            cosma_context<std::complex<double>> *ctx,\n            Interval &P,\n            std::complex<double> *LC, // expanded_matrix\n            std::complex<double> *C,  // original matrix\n            std::complex<double> *reshuffle_buffer,\n            std::complex<double> *reduce_buffer,\n            std::vector<std::vector<int>> &c_current,\n            std::vector<int> &c_total_current,\n            std::vector<std::vector<int>> &c_expanded,\n            std::vector<int> &c_total_expanded,\n            std::complex<double> beta,\n            size_t step,\n            bool copy_c_back);\n\n// template instantiation for nccl_copy\ntemplate void cosma::gpu::nccl_copy<float>(\n            cosma_context<float> *ctx,\n            Interval &P,\n            float * in, // original_matrix\n            float * out,  // expanded matrix\n            float *reshuffle_buffer,\n            std::vector<std::vector<int>>& size_before,\n            std::vector<int> &total_before,\n            int total_after,\n            size_t step);\ntemplate void cosma::gpu::nccl_copy<double>(\n            cosma_context<double> *ctx,\n            Interval &P,\n            double * in, // original_matrix\n            double * out,  // expanded matrix\n            double *reshuffle_buffer,\n            std::vector<std::vector<int>>& size_before,\n            std::vector<int> &total_before,\n            int total_after,\n            size_t step);\ntemplate void cosma::gpu::nccl_copy<std::complex<float>>(\n            cosma_context<std::complex<float>> *ctx,\n            Interval &P,\n            std::complex<float> * in, // original_matrix\n            std::complex<float> * out,  // expanded matrix\n            std::complex<float> *reshuffle_buffer,\n            std::vector<std::vector<int>>& size_before,\n            std::vector<int> &total_before,\n            int total_after,\n            size_t step);\n\ntemplate void cosma::gpu::nccl_copy<std::complex<double>>(\n            cosma_context<std::complex<double>> *ctx,\n            Interval &P,\n            std::complex<double> * in, // original_matrix\n            std::complex<double> * out,  // expanded matrix\n            std::complex<double> *reshuffle_buffer,\n            std::vector<std::vector<int>>& size_before,\n            std::vector<int> &total_before,\n            int total_after,\n            size_t step);\n"
  },
  {
    "path": "src/cosma/gpu/nccl_utils.hpp",
    "content": "#pragma once\n#include <vector>\n\n#include <cosma/interval.hpp>\n#include <cosma/context.hpp>\n\n#include <mpi.h>\n\n#if defined(TILED_MM_CUDA)\n#include <nccl.h>\n\n#elif defined(TILED_MM_ROCM)\n#include <rccl/rccl.h>\n\n#else\n#error Either TILED_MM_CUDA or TILED_MM_ROCM must be defined!\n#endif\n\nnamespace cosma {\nnamespace gpu {\n    void check_nccl_status(ncclResult_t result);\n\n    ncclComm_t mpi_to_nccl_comm(MPI_Comm comm);\n\n    void free_nccl_comm(ncclComm_t nccl_comm);\n\n    template <typename Scalar>\n    void nccl_copy(\n                cosma_context<Scalar> *ctx,\n                Interval &P,\n                Scalar * in, // original_matrix\n                Scalar * out,  // expanded matrix\n                Scalar *reshuffle_buffer,\n                std::vector<std::vector<int>>& size_before,\n                std::vector<int> &total_before,\n                int total_after,\n                size_t step);\n\n    template <typename Scalar>\n    void nccl_reduce(\n                cosma_context<Scalar> *ctx,\n                Interval &P,\n                Scalar *LC, // expanded_matrix\n                Scalar *C,  // original matrix\n                Scalar *reshuffle_buffer,\n                Scalar *reduce_buffer,\n                std::vector<std::vector<int>> &c_current,\n                std::vector<int> &c_total_current,\n                std::vector<std::vector<int>> &c_expanded,\n                std::vector<int> &c_total_expanded,\n                Scalar beta,\n                size_t step,\n                bool copy_c_back);\n\n}  // namespace gpu\n}  // namespace cosma\n"
  },
  {
    "path": "src/cosma/gpu/utils.hpp",
    "content": "#pragma once\n\n#include <cosma/gpu/gpu_runtime_api.hpp>\n\nnamespace cosma {\nnamespace gpu {\n    void check_runtime_status(runtime_api::StatusType status) {\n        if(status !=  runtime_api::status::Success) {\n            std::cerr << \"error: GPU API call : \"\n            << runtime_api::get_error_string(status) << std::endl;\n            throw(std::runtime_error(\"GPU ERROR\"));\n        }\n    }\n\n    // copy n*T from host to device\n    // If a cuda stream is passed as the final argument the copy will be performed\n    // asynchronously in the specified stream, otherwise it will be serialized in\n    // the default (NULL) stream\n    template <typename T>\n    void copy_to_device_async(const T* from, T* to, size_t n, runtime_api::StreamType stream=NULL) {\n        auto status = runtime_api::memcpy_async(to, from, n * sizeof(T),\n                runtime_api::flag::MemcpyHostToDevice, stream);\n        check_runtime_status(status);\n    }\n\n    // copy n*T from device to host\n    // If a cuda stream is passed as the final argument the copy will be performed\n    // asynchronously in the specified stream, otherwise it will be serialized in\n    // the default (NULL) stream\n    template <typename T>\n    void copy_to_host_async(const T* from, T* to, size_t n, runtime_api::StreamType stream=NULL) {\n        auto status = runtime_api::memcpy_async(to, from, n * sizeof(T),\n                                                runtime_api::flag::MemcpyDeviceToHost, stream);\n        check_runtime_status(status);\n    }\n\n    template <typename T>\n    void copy_device_to_device_async(const T* from, T* to, size_t n, runtime_api::StreamType stream=NULL) {\n        auto status = runtime_api::memcpy_async(to, from, n * sizeof(T),\n                                                runtime_api::flag::MemcpyDeviceToDevice, stream);\n        check_runtime_status(status);\n    }\n\n\n} // gpu\n} // cosma\n"
  },
  {
    "path": "src/cosma/interpose.h",
    "content": "/*\n * taken from: https://github.com/helixhorned/interpose/tree/feature/c-header-and-example-logger\n * which is forked from: https://github.com/ccurtsinger/interpose\n *\n * MIT License\n\n   Copyright (c) 2017 Charlie Curtsinger\n\n   Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n   The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\n   THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n */\n\n#if !defined(__INTERPOSE_H)\n#define __INTERPOSE_H\n\n#include <stdint.h>\n\n#include <dlfcn.h>\n\n#if defined(__ELF__)\n\n#if !defined _GNU_SOURCE\n# error Must define _GNU_SOURCE for RTLD_NEXT to be available.\n#endif\n\n// The C version of the interposition macro differs from that of the C++ version: the user\n// has to provide the argument type-and-name list as a macro argument (instead of postfixing\n// it to the macro invocation), and to redundantly provide the list of just argument names,\n// which must match those in the type-and-name list.\n#define INTERPOSE__C_GENERIC__(RETURN_TYPE, NAME, ARG_TYPE_AND_NAME_LIST, ...) \\\n  static RETURN_TYPE Real__##NAME ARG_TYPE_AND_NAME_LIST { \\\n    static __typeof__(NAME)* real_##NAME; \\\n    __typeof__(NAME)* func = __atomic_load_n(&real_##NAME, __ATOMIC_CONSUME); \\\n    if(!func) { \\\n      func = (__typeof__(NAME)*)( \\\n        (uintptr_t)(dlsym(RTLD_NEXT, #NAME))); \\\n      __atomic_store_n(&real_##NAME, func, __ATOMIC_RELEASE); \\\n    } \\\n    __VA_ARGS__; \\\n  } \\\n  extern __typeof__(NAME) NAME __attribute__((weak, alias(\"__interpose_\" #NAME))); \\\n  extern RETURN_TYPE __interpose_##NAME ARG_TYPE_AND_NAME_LIST\n\n#define INTERPOSE_C(RETURN_TYPE, NAME, ARG_TYPE_AND_NAME_LIST, ARG_NAME_LIST) \\\n  INTERPOSE__C_GENERIC__(RETURN_TYPE, NAME, ARG_TYPE_AND_NAME_LIST, return func ARG_NAME_LIST)\n\n#define INTERPOSE_C_VOID(NAME, ARG_TYPE_AND_NAME_LIST, ARG_NAME_LIST) \\\n  INTERPOSE__C_GENERIC__(void, NAME, ARG_TYPE_AND_NAME_LIST, func ARG_NAME_LIST)\n\n#elif defined(__APPLE__)\n\n/// Structure exposed to the linker for interposition\nstruct __osx_interpose {\n\tconst void* new_func;\n\tconst void* orig_func;\n};\n\n/**\n * Generate a macOS interpose struct\n * Types from: http://opensource.apple.com/source/dyld/dyld-210.2.3/include/mach-o/dyld-interposing.h\n */\n#define OSX_INTERPOSE_STRUCT(NEW, OLD) \\\n  static const struct __osx_interpose __osx_interpose_##OLD \\\n    __attribute__((used, section(\"__DATA, __interpose\"))) = \\\n    { (const void*)((uintptr_t)(&(NEW))), \\\n      (const void*)((uintptr_t)(&(OLD))) }\n\n/**\n  * The OSX interposition process is much simpler. Just create an OSX interpose struct,\n  * include the actual function in the `real` namespace, and declare the beginning of the\n  * replacement function with the appropriate return type.\n  */\n#define INTERPOSE__C_GENERIC__(RETURN_TYPE, NAME, ARG_TYPE_AND_NAME_LIST, ...) \\\n  static RETURN_TYPE Real__##NAME ARG_TYPE_AND_NAME_LIST { \\\n    __VA_ARGS__; \\\n  } \\\n  extern RETURN_TYPE __interpose_##NAME ARG_TYPE_AND_NAME_LIST; \\\n  OSX_INTERPOSE_STRUCT(__interpose_##NAME, NAME); \\\n  extern RETURN_TYPE __interpose_##NAME ARG_TYPE_AND_NAME_LIST\n\n#define INTERPOSE_C(RETURN_TYPE, NAME, ARG_TYPE_AND_NAME_LIST, ARG_NAME_LIST) \\\n  INTERPOSE__C_GENERIC__(RETURN_TYPE, NAME, ARG_TYPE_AND_NAME_LIST, return NAME ARG_NAME_LIST)\n\n#define INTERPOSE_C_VOID(NAME, ARG_TYPE_AND_NAME_LIST, ARG_NAME_LIST) \\\n  INTERPOSE__C_GENERIC__(void, NAME, ARG_TYPE_AND_NAME_LIST, NAME ARG_NAME_LIST)\n\n#else\n# error Unsupported platform.\n#endif\n\n#endif\n"
  },
  {
    "path": "src/cosma/interval.cpp",
    "content": "#include <cosma/interval.hpp>\n\nnamespace cosma {\n// interval of consecutive numbers\nInterval::Interval() = default;\n\nInterval::Interval(int start, int end)\n    : start_(start)\n    , end_(end) {\n    if (start < 0 || end < 0) {\n        throw std::runtime_error(\n            \"ERROR: in class interval (COSMA): start, end > 0 must be satisfied.\");\n    }\n\n    if (start > end) {\n        throw std::runtime_error(\n            \"ERROR: in class interval (COSMA): start<=end must be satisfied.\");\n    }\n}\n\nint Interval::first() const { return start_; }\n\nint Interval::last() const { return end_; }\n\nstd::size_t Interval::length() { return end_ - start_ + 1; }\n\nbool Interval::empty() { return start_ == end_; }\n\nbool Interval::only_one() { return length() == 1; }\n\n// divides the interval into intervals of equal length.\n// if the interval is not divisible by divisor, then\n// last interval might not be of the same size as others.\nstd::vector<Interval> Interval::divide_by(int divisor) {\n    if (length() < divisor) {\n        return {*this};\n    }\n\n    std::vector<Interval> divided(divisor);\n\n    for (int i = 0; i < divisor; i++) {\n        divided[i] = subinterval(divisor, i);\n    }\n\n    return divided;\n}\n\nint Interval::subinterval_index(int divisor, int elem) {\n    int subset_size = length() / divisor;\n    int relative = elem - first();\n    int subint_index = relative / subset_size;\n    return subint_index;\n}\n\nint Interval::subinterval_offset(int divisor, int elem) {\n    int subset_size = length() / divisor;\n    int relative = elem - first();\n    int subint_index = relative / subset_size;\n    int offset = relative - subint_index * subset_size;\n    return offset;\n}\n\nstd::pair<int, int> Interval::locate_in_subinterval(int divisor, int elem) {\n    int subset_size = length() / divisor;\n    int relative = elem - first();\n    int subint_index = relative / subset_size;\n    int offset = relative - subint_index * subset_size;\n    return {subint_index, offset};\n}\n\nint Interval::locate_in_interval(int divisor,\n                                 int subint_index,\n                                 int subint_offset) {\n    int subset_size = length() / divisor;\n    return subint_index * subset_size + subint_offset;\n}\n\n// returns the interval containing elem\nInterval Interval::subinterval_containing(int divisor, int elem) {\n    return subinterval(divisor, subinterval_index(divisor, elem));\n}\n\n// returns the box_index-th interval\nInterval Interval::subinterval(int divisor, int box_index) {\n    if (length() < divisor) {\n        return {*this};\n    }\n\n    // this will interleave smaller and bigger intervals\n    int start = length() * box_index / divisor;\n    int end = length() * (box_index + 1) / divisor - 1;\n\n    // alternative that will first have all the bigger intervals and then the\n    // smaller ones int interval_length = length() / divisor + (box_index <=\n    // length() % divisor ? 1 : 0)\n\n    return Interval(start_ + start, start_ + end);\n}\n\nint Interval::largest_subinterval_length(int divisor) {\n    return length() / divisor + (length() % divisor == 0 ? 0 : 1);\n}\n\nint Interval::smallest_subinterval_length(int divisor) {\n    return length() / divisor;\n}\n\nstd::ostream &operator<<(std::ostream &os, const Interval &inter) {\n    os << '[' << inter.start_ << \", \" << inter.end_ << ']';\n    return os;\n}\n\nbool Interval::contains(int num) { return num >= first() && num <= last(); }\n\nbool Interval::contains(Interval other) {\n    return first() <= other.first() && last() >= other.last();\n}\n\nbool Interval::before(Interval &other) const { \n    return last() < other.first(); \n}\n\nbool Interval::operator==(const Interval &other) const {\n    return start_ == other.start_ && end_ == other.end_;\n}\n\nInterval2D::Interval2D() = default;\nInterval2D::Interval2D(Interval row, Interval col)\n    : rows(row)\n    , cols(col) {}\n\nInterval2D::Interval2D(int row_start, int row_end, int col_start, int col_end) {\n    rows = Interval(row_start, row_end);\n    cols = Interval(col_start, col_end);\n}\n\n// splits the current Interval2D into divisor many submatrices by splitting\n// only the columns interval and returns the size of the submatrix indexed with\n// index\nstd::size_t Interval2D::split_by(int divisor, int index) {\n    if (index >= divisor) {\n        std::cout << \"Error in Interval2D.split_by: trying to access \" << index\n                  << \"-subinterval, out of \" << divisor\n                  << \" total subintervals\\n\";\n        return -1;\n    }\n\n    if (cols.length() < 0 || cols.length() < divisor) {\n        std::cout << \"Error in Interval2D.split_by: trying to divide the \"\n                     \"subinterval of length \"\n                  << cols.length() << \" into \" << divisor\n                  << \" many subintervals\\n\";\n        return -1;\n    }\n\n    return rows.length() * cols.subinterval(divisor, index).length();\n}\n\nstd::size_t Interval2D::size() {\n    auto size = split_by(1, 0);\n    return size;\n}\n\nbool Interval2D::contains(int row, int col) {\n    return rows.contains(row) && cols.contains(col);\n}\n\nbool Interval2D::contains(Interval2D other) {\n    return rows.contains(other.rows) && cols.contains(other.cols);\n}\n\nbool Interval2D::before(Interval2D &other) const {\n    return (rows.before(other.rows) && other.cols.contains(cols)) ||\n           (cols.before(other.cols) && other.rows.contains(rows));\n    // return (rows.before(other.rows))|| (cols.before(other.cols));\n}\n\nint Interval2D::local_index(int row, int col) {\n    if (!contains(row, col)) {\n        return -1;\n    }\n    row -= rows.first();\n    col -= cols.first();\n    return col * rows.length() + row;\n}\n\nstd::pair<int, int> Interval2D::global_index(int local_index) {\n    int x, y;\n    x = rows.first() + local_index % rows.length();\n    y = cols.first() + local_index / rows.length();\n    return {x, y};\n}\n\nInterval2D Interval2D::submatrix(int divisor, int index) {\n    return Interval2D(rows, cols.subinterval(divisor, index));\n}\n\nbool Interval2D::operator==(const Interval2D &other) const {\n    return (rows == other.rows) && (cols == other.cols);\n}\n\nstd::ostream &operator<<(std::ostream &os, const Interval2D &inter) {\n    os << \"rows \" << inter.rows << \"; columns: \" << inter.cols;\n    return os;\n}\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/interval.hpp",
    "content": "#pragma once\n#include <iostream>\n#include <vector>\n\nnamespace cosma {\n// interval of consecutive numbers\nclass Interval {\n  public:\n    int start_;\n    int end_;\n\n    Interval();\n    Interval(int start, int end);\n\n    int first() const;\n    int last() const;\n\n    std::size_t length();\n    bool empty();\n    bool only_one();\n\n    // divides the interval into intervals of equal length.\n    // if the interval is not divisible by divisor, then\n    // last interval might not be of the same size as others.\n    std::vector<Interval> divide_by(int divisor);\n\n    int subinterval_index(int divisor, int elem);\n    int subinterval_offset(int divisor, int elem);\n    std::pair<int, int> locate_in_subinterval(int divisor, int elem);\n    int locate_in_interval(int divisor, int subint_index, int subint_offset);\n\n    // returns the interval containing elem\n    Interval subinterval_containing(int divisor, int elem);\n\n    // returns the box_index-th interval\n    Interval subinterval(int divisor, int box_index);\n\n    // returns the largest subinterval when divided by divisor\n    int largest_subinterval_length(int divisor);\n\n    // returns the smallest subinterval when divided by divisor\n    int smallest_subinterval_length(int divisor);\n\n    bool contains(int num);\n    bool contains(Interval other);\n    bool before(Interval &other) const;\n\n    bool operator==(const Interval &other) const;\n\n    friend std::ostream &operator<<(std::ostream &os, const Interval &inter);\n};\n\nclass Interval2D {\n  public:\n    Interval rows;\n    Interval cols;\n\n    Interval2D();\n    Interval2D(Interval row, Interval col);\n    Interval2D(int row_start, int row_end, int col_start, int col_end);\n\n    // splits the current Interval2D into divisor many submatrices by splitting\n    // only the columns interval and returns the size of the submatrix indexed\n    // with index\n    std::size_t split_by(int divisor, int index);\n\n    std::size_t size();\n\n    bool contains(int row, int col);\n    bool contains(Interval2D other);\n    bool before(Interval2D &other) const;\n\n    int local_index(int row, int col);\n    std::pair<int, int> global_index(int local_index);\n\n    Interval2D submatrix(int divisor, int index);\n\n    bool operator==(const Interval2D &other) const;\n    friend std::ostream &operator<<(std::ostream &os, const Interval2D &inter);\n};\n} // namespace cosma\n\ntemplate <class T>\ninline void hash_combine(std::size_t &s, const T &v) {\n    std::hash<T> h;\n    s ^= h(v) + 0x9e3779b9 + (s << 6) + (s >> 2);\n}\n\n// add hash function specialization for these struct-s\n// so that we can use this class as a key of the unordered_map\nnamespace std {\ntemplate <>\nstruct hash<cosma::Interval> {\n    std::size_t operator()(const cosma::Interval &k) const {\n        using std::hash;\n\n        // Compute individual hash values for first,\n        // second and third and combine them using XOR\n        // and bit shifting:\n        size_t result = 0;\n        hash_combine(result, k.start_);\n        hash_combine(result, k.end_);\n        return result;\n    }\n};\n\ntemplate <>\nstruct hash<cosma::Interval2D> {\n    std::size_t operator()(const cosma::Interval2D &k) const {\n        using std::hash;\n\n        // Compute individual hash values for first,\n        // second and third and combine them using XOR\n        // and bit shifting:\n        size_t result = 0;\n        hash_combine(result, k.rows);\n        hash_combine(result, k.cols);\n        return result;\n    }\n};\n} // namespace std\n"
  },
  {
    "path": "src/cosma/layout.cpp",
    "content": "#include <cosma/layout.hpp>\n#include <cosma/profiler.hpp>\n\nnamespace cosma {\nLayout::Layout(Mapper* mapper):\n    label_(mapper->label()),\n    m_(mapper->m()),\n    n_(mapper->n()),\n    P_(mapper->P()),\n    rank_(mapper->rank()),\n    mapper_(mapper) {\n    PE(preprocessing_matrices_layout);\n    initial_size_ = std::vector<int>(P_);\n    bucket_size_ = std::vector<std::vector<int>>(P_, std::vector<int>());\n    pointer_ = std::vector<int>(P_);\n\n    for (size_t p = 0; p < P_; ++p) {\n        int sum = 0;\n        auto ranges = mapper_->initial_layout(p);\n\n        for (size_t bucket = 0; bucket < ranges.size(); ++bucket) {\n            int size = ranges[bucket].size();\n            bucket_size_[p].push_back(size);\n            sum += size;\n        }\n        initial_size_[p] = sum;\n    }\n    PL();\n}\n\nint Layout::size() { \n    return size(rank_); \n}\n\nint Layout::size(int rank) { \n    if (rank < P_)\n        return bucket_size_[rank][pointer_[rank]]; \n    return 0;\n}\n\n// we cannot use the precomputed bucket_offset_ here, since\n// the buckets might have increased due to communication\n// if parallel and sequential steps are interleaved\nint Layout::offset(int rank, int prev_pointer) {\n    int sum = 0;\n\n    for (int pointer = prev_pointer; pointer < pointer_[rank]; ++pointer) {\n        sum += bucket_size_[rank][pointer];\n    }\n\n    return sum;\n}\n\nint Layout::offset(int prev_pointer) { return offset(rank_, prev_pointer); }\n\nvoid Layout::next(int rank) { pointer_[rank]++; }\n\nvoid Layout::next() {\n    // move the pointer to the next range that this rank owns\n    // and put its size in buffer_size_[rank_]\n    next(rank_);\n}\n\nvoid Layout::prev(int rank) { pointer_[rank]--; }\n\nvoid Layout::prev() { prev(rank_); }\n\nstd::vector<int> Layout::seq_buckets(Interval &newP) {\n    std::vector<int> result(newP.length());\n    for (int i = newP.first(); i <= newP.last(); ++i) {\n        result[i - newP.first()] = pointer_[i];\n    }\n    return result;\n}\n\nvoid Layout::set_seq_buckets(Interval &newP, std::vector<int> &pointers) {\n    for (int i = newP.first(); i <= newP.last(); ++i) {\n        pointer_[i] = pointers[i - newP.first()];\n    }\n}\n\nint Layout::seq_bucket(int rank) { return pointer_[rank]; }\n\nint Layout::seq_bucket() { return seq_bucket(rank_); }\n\nvoid Layout::update_buckets(Interval &P, Interval2D &range) {\n    for (int rank = P.first(); rank <= P.last(); ++rank) {\n        int pointer = pointer_[rank];\n        auto &ranges = mapper_->initial_layout(rank);\n\n        while (pointer < ranges.size() && ranges[pointer].before(range)) {\n            next(rank);\n            pointer++;\n        }\n    }\n}\n\nvoid Layout::buffers_before_expansion(\n    Interval &P,\n    Interval2D &range,\n    std::vector<std::vector<int>> &size_per_rank,\n    std::vector<int> &total_size_per_rank) {\n\n    for (int i = P.first(); i <= P.last(); ++i) {\n        size_per_rank[i - P.first()] =\n            sizes_inside_range(range, i, total_size_per_rank[i - P.first()]);\n    }\n}\n\nvoid Layout::buffers_after_expansion(\n    Interval &P,\n    Interval &newP,\n    std::vector<std::vector<int>> &size_per_rank,\n    std::vector<int> &total_size_per_rank,\n    std::vector<std::vector<int>> &new_size,\n    std::vector<int> &new_total) {\n    int subset_size = newP.length();\n    int div = P.length() / newP.length();\n\n    for (int comm_ring = 0; comm_ring < newP.length(); ++comm_ring) {\n        int n_bucket = size_per_rank[comm_ring].size();\n        new_size[comm_ring] = std::vector<int>(n_bucket);\n\n        for (int bucket = 0; bucket < n_bucket; ++bucket) {\n            for (int group = 0; group < div; ++group) {\n                int rank = group * subset_size + comm_ring;\n                new_size[comm_ring][bucket] += size_per_rank[rank][bucket];\n            }\n            new_total[comm_ring] += new_size[comm_ring][bucket];\n        }\n    }\n}\n\nvoid Layout::set_sizes(Interval &newP,\n                       std::vector<std::vector<int>> &size_per_rank,\n                       int offset) {\n    for (int i = newP.first(); i <= newP.last(); ++i) {\n        set_sizes(i, size_per_rank[i - newP.first() + offset], pointer_[i]);\n    }\n}\n\nvoid Layout::set_sizes(Interval &newP,\n                       std::vector<std::vector<int>> &size_per_rank) {\n    for (int i = newP.first(); i <= newP.last(); ++i) {\n        set_sizes(i, size_per_rank[i - newP.first()], pointer_[i]);\n    }\n}\n\nvoid Layout::set_sizes(int rank, std::vector<int> &sizes, int start) {\n    int pointer = start;\n    auto &b_sizes = bucket_size_[rank];\n\n    for (int i = pointer; i < std::min(sizes.size() + pointer, b_sizes.size());\n         ++i) {\n        b_sizes[i] = sizes[i - pointer];\n    }\n}\n\n// get sizes of all ranges inside range of rank and remember the total_size\nstd::vector<int>\nLayout::sizes_inside_range(Interval2D &range, int rank, int &total_size) {\n    if (rank >= P_) {\n        total_size = 0;\n        return {};\n    }\n\n    std::vector<int> sizes;\n    total_size = 0;\n    int pointer = pointer_[rank];\n    auto &ranges = mapper_->initial_layout(rank);\n    auto &b_size = bucket_size_[rank];\n\n    while (pointer < ranges.size()) {\n        auto &current_range = ranges[pointer];\n\n        if (!range.contains(current_range)) {\n            break;\n        }\n\n        int current_size = b_size[pointer];\n\n        sizes.push_back(current_size);\n        total_size += current_size;\n\n        pointer++;\n    }\n    return sizes;\n}\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/layout.hpp",
    "content": "#pragma once\n\n#include <cosma/interval.hpp>\n#include <cosma/mapper.hpp>\n\n#include <algorithm>\n#include <cassert>\n#include <fstream>\n#include <memory>\n#include <numeric>\n#include <set>\n#include <stdexcept>\n#include <string>\n#include <tuple>\n#include <unordered_map>\n#include <vector>\n\nnamespace cosma {\nclass Layout {\n\n  public:\n    Layout() = default;\n    Layout(Mapper* mapper);\n\n    int size(int rank);\n    int size();\n\n    int offset(int rank, int prev_bucket);\n    int offset(int prev_bucket);\n\n    void update_buckets(Interval &P, Interval2D &range);\n    int seq_bucket(int rank);\n    int seq_bucket();\n    std::vector<int> seq_buckets(Interval &newP);\n    void set_seq_buckets(Interval &newP, std::vector<int> &pointers);\n\n    void buffers_before_expansion(Interval &P,\n                                  Interval2D &range,\n                                  std::vector<std::vector<int>> &size_per_rank,\n                                  std::vector<int> &total_size_per_rank);\n\n    void buffers_after_expansion(Interval &P,\n                                 Interval &newP,\n                                 std::vector<std::vector<int>> &size_per_rank,\n                                 std::vector<int> &total_size_per_rank,\n                                 std::vector<std::vector<int>> &new_size,\n                                 std::vector<int> &new_total);\n\n    std::vector<int>\n    sizes_inside_range(Interval2D &range, int rank, int &total_size);\n\n    void set_sizes(Interval &newP,\n                   std::vector<std::vector<int>> &size_per_rank,\n                   int offset);\n    void set_sizes(Interval &newP,\n                   std::vector<std::vector<int>> &size_per_rank);\n    void set_sizes(int rank, std::vector<int> &sizes, int start);\n\n  protected:\n    char label_;\n\n    /// Number of rows of the global atrix\n    int m_;\n    /// Number of columns of the global matrix\n    int n_;\n    /// Maximum number of rank in the global communicator\n    int P_;\n\n    int rank_;\n\n    // rank -> list of submatrices that this rank owns\n    // the number of submatrices that this rank owns\n    // is equal to the number of seq steps in which\n    // this matrix was divided\n    // std::vector<std::vector<Interval2D>> rank_to_range_;\n    // rank -> total initial buffer size\n    std::vector<int> initial_size_;\n    // rank -> buffer size in the current branch of steps\n    std::vector<std::vector<int>> bucket_size_;\n    std::vector<int> pointer_;\n\n    Interval mi_;\n    Interval ni_;\n    Interval Pi_;\n\n    std::vector<int> ranks_reordering;\n    bool ranks_reordered = false;\n\n    Mapper* mapper_;\n\n  private:\n    void next(int rank);\n    void next();\n\n    void prev(int rank);\n    void prev();\n};\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/local_multiply.cpp",
    "content": "#include \"cosma/context.hpp\"\n#include <cosma/local_multiply.hpp>\n#include <cosma/profiler.hpp>\n#include <cosma/timer.hpp>\n\n#ifdef COSMA_HAVE_GPU\n#include <Tiled-MM/tiled_mm.hpp>\n#include <Tiled-MM/util.hpp>\n\n#ifdef COSMA_USE_UNIFIED_MEMORY\n#include <Tiled-MM/gpu_blas_api.hpp>\n#endif\n#endif\n\n#if defined(COSMA_WITH_BLAS) || defined(COSMA_WITH_MKL_BLAS)\n#include <cosma/blas.hpp>\n#endif\n\n#include <chrono>\n#include <complex>\n#include <vector>\n\n#include <mpi.h>\n\nnamespace cosma {\n\nusing clock_t = std::chrono::high_resolution_clock;\nusing ms_t = std::chrono::milliseconds;\n\n#ifdef COSMA_USE_UNIFIED_MEMORY\nusing zfloat = std::complex<float>;\nusing zdouble = std::complex<double>;\n\nint get_first(char trans, int m, int n) { return trans == 'N' ? m : n; }\n\nint get_second(char trans, int m, int n) { return trans == 'N' ? n : m; }\n\ngpu::blas_api::OperationType get_blas_operation(char trans) {\n    gpu::blas_api::OperationType op =\n        trans == 'T'\n            ? gpu::blas_api::operation::Transpose\n            : (trans == 'C' ? gpu::blas_api::operation::ConjugateTranspose\n                            : gpu::blas_api::operation::None);\n    return op;\n}\n\ngpu::blas_api::StatusType cublas_gemm_wrapper(gpu::blas_api::HandleType handle,\n                                              char trans_a,\n                                              char trans_b,\n                                              int m,\n                                              int n,\n                                              int k,\n                                              const float *alpha,\n                                              const float *a,\n                                              const float *b,\n                                              const float *beta,\n                                              float *c,\n                                              int lld_c) {\n    gpu::blas_api::OperationType op_a = get_blas_operation(trans_a);\n    gpu::blas_api::OperationType op_b = get_blas_operation(trans_b);\n\n    int ld_a = get_first(trans_a, m, k);\n    int ld_b = get_first(trans_b, k, n);\n\n    return gpu::blas_api::sgemm(\n        handle, op_a, op_b, m, n, k, alpha, a, ld_a, b, ld_b, beta, c, lld_c);\n}\n\ngpu::blas_api::StatusType cublas_gemm_wrapper(gpu::blas_api::HandleType handle,\n                                              char trans_a,\n                                              char trans_b,\n                                              int m,\n                                              int n,\n                                              int k,\n                                              const double *alpha,\n                                              const double *a,\n                                              const double *b,\n                                              const double *beta,\n                                              double *c,\n                                              int lld_c) {\n    gpu::blas_api::OperationType op_a = get_blas_operation(trans_a);\n    gpu::blas_api::OperationType op_b = get_blas_operation(trans_b);\n\n    int ld_a = get_first(trans_a, m, k);\n    int ld_b = get_first(trans_b, k, n);\n\n    return gpu::blas_api::dgemm(\n        handle, op_a, op_b, m, n, k, alpha, a, ld_a, b, ld_b, beta, c, lld_c);\n}\n\n// Note: Converting from std::complex to cuComplex and cuDoubleComple\n//       works because they are binary compatible.\n//\n//       http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=902\n//\ngpu::blas_api::StatusType cublas_gemm_wrapper(gpu::blas_api::HandleType handle,\n                                              char trans_a,\n                                              char trans_b,\n                                              int m,\n                                              int n,\n                                              int k,\n                                              const zfloat *alpha,\n                                              const zfloat *a,\n                                              const zfloat *b,\n                                              const zfloat *beta,\n                                              zfloat *c,\n                                              int lld_c) {\n    gpu::blas_api::OperationType op_a = get_blas_operation(trans_a);\n    gpu::blas_api::OperationType op_b = get_blas_operation(trans_b);\n\n    int ld_a = get_first(trans_a, m, k);\n    int ld_b = get_first(trans_b, k, n);\n\n    return gpu::blas_api::cgemm(\n        handle,\n        op_a,\n        op_b,\n        m,\n        n,\n        k,\n        reinterpret_cast<const gpu::blas_api::ComplexFloatType *>(alpha),\n        reinterpret_cast<const gpu::blas_api::ComplexFloatType *>(a),\n        ld_a,\n        reinterpret_cast<const gpu::blas_api::ComplexFloatType *>(b),\n        ld_b,\n        reinterpret_cast<const gpu::blas_api::ComplexFloatType *>(beta),\n        reinterpret_cast<gpu::blas_api::ComplexFloatType *>(c),\n        lld_c);\n}\n\ngpu::blas_api::StatusType cublas_gemm_wrapper(gpu::blas_api::HandleType handle,\n                                              char trans_a,\n                                              char trans_b,\n                                              int m,\n                                              int n,\n                                              int k,\n                                              const zdouble *alpha,\n                                              const zdouble *a,\n                                              const zdouble *b,\n                                              const zdouble *beta,\n                                              zdouble *c,\n                                              int lld_c) {\n    gpu::blas_api::OperationType op_a = get_blas_operation(trans_a);\n    gpu::blas_api::OperationType op_b = get_blas_operation(trans_b);\n\n    int ld_a = get_first(trans_a, m, k);\n    int ld_b = get_first(trans_b, k, n);\n\n    return gpu::blas_api::zgemm(\n        handle,\n        op_a,\n        op_b,\n        m,\n        n,\n        k,\n        reinterpret_cast<const gpu::blas_api::ComplexDoubleType *>(alpha),\n        reinterpret_cast<const gpu::blas_api::ComplexDoubleType *>(a),\n        ld_a,\n        reinterpret_cast<const gpu::blas_api::ComplexDoubleType *>(b),\n        ld_b,\n        reinterpret_cast<const gpu::blas_api::ComplexDoubleType *>(beta),\n        reinterpret_cast<gpu::blas_api::ComplexDoubleType *>(c),\n        lld_c);\n}\n#endif\n\ntemplate <typename Scalar>\nvoid print_matrix(int m, int n, Scalar *A, char label) {\n    std::cout << \"Matrix \" << label << std::endl;\n    for (int i = 0; i < m; ++i) {\n        for (int j = 0; j < n; ++j) {\n            std::cout << A[j * m + i] << \" \";\n        }\n        std::cout << std::endl;\n    }\n    std::cout << std::endl;\n}\n\ntemplate <typename Scalar>\nclock_t::time_point debug_gemm_start(Scalar *matrixA,\n                                     Scalar *matrixB,\n                                     Scalar *matrixC,\n                                     int m,\n                                     int n,\n                                     int k,\n                                     Scalar alpha,\n                                     Scalar beta) {\n    auto start = clock_t::now();\n    if (std::abs(beta) > 0) {\n        std::cout << \"C (before) = \" << std::endl;\n        print_matrix(m, n, matrixC, 'C');\n        auto C_partial = std::unique_ptr<Scalar[]>(new Scalar[m * n]);\n        gemm(m, n, k, alpha, matrixA, m, matrixB, k, 0.0, C_partial.get(), m);\n        std::cout << \"C (partial) = \" << std::endl;\n        print_matrix(m, n, C_partial.get(), 'C');\n    }\n    return start;\n}\n\ntemplate <typename Scalar>\nclock_t::time_point debug_gemm_end(Scalar *matrixA,\n                                   Scalar *matrixB,\n                                   Scalar *matrixC,\n                                   int m,\n                                   int n,\n                                   int k,\n                                   Scalar alpha,\n                                   Scalar beta) {\n    std::cout << \"After multiplication: \" << std::endl;\n    std::cout << \"beta = \" << beta << std::endl;\n    print_matrix(m, k, matrixA, 'A');\n    print_matrix(k, n, matrixB, 'B');\n    print_matrix(m, n, matrixC, 'C');\n\n    return std::chrono::high_resolution_clock::now();\n}\n\n#ifdef COSMA_HAVE_GPU\ntemplate <typename Scalar>\nvoid local_multiply(gpu::mm_handle<Scalar> *gpu_ctx,\n                    Scalar *matrixA,\n                    Scalar *matrixB,\n                    Scalar *matrixC,\n                    int m,\n                    int n,\n                    int k,\n                    Scalar alpha,\n                    Scalar beta,\n                    bool pin_host_buffers,\n                    bool copy_c_back) {\n    /*\n    int rank = 0;\n    MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n    if (rank == 0) {\n        // print_matrix(m, k, matrixA, 'A');\n        // print_matrix(k, n, matrixB, 'B');\n        // std::cout << \"m = \" << m << \", n = \" << n << \", k = \" << k <<\n    std::endl;\n    }\n    */\n    int ld_a = m;\n    int ld_b = k;\n    int ld_c = m;\n\n    gpu::gemm(*gpu_ctx,\n              'N',\n              'N',\n              m,\n              n,\n              k,\n              alpha,\n              matrixA,\n              ld_a,\n              matrixB,\n              ld_b,\n              beta,\n              matrixC,\n              ld_c,\n              pin_host_buffers,\n              copy_c_back);\n\n    /*\n    if (rank == 0) {\n        gpu::copy_to_host(gpu_ctx->get_full_device_buffer_c().data(), matrixC, m\n    * n); print_matrix(m, n, matrixC, 'C'); std::cout << \"alpha = \" << alpha <<\n    \", beta = \" << beta << std::endl;\n    }\n    */\n}\n#endif\n\ntemplate <typename Scalar>\nScalar &get_element(Scalar *mat, int m, int n, int i, int j) {\n    return mat[j * m + i];\n}\n\ntemplate <typename Scalar>\nvoid local_multiply_cpu(Scalar *matrixA,\n                        Scalar *matrixB,\n                        Scalar *matrixC,\n                        int m,\n                        int n,\n                        int k,\n                        Scalar alpha,\n                        Scalar beta) {\n    for (int mi = 0; mi < m; ++mi) {\n        for (int ni = 0; ni < n; ++ni) {\n            Scalar &Cvalue = get_element(matrixC, m, n, mi, ni);\n            Cvalue *= beta;\n            for (int ki = 0; ki < k; ++ki) {\n                Scalar &Avalue = get_element(matrixA, m, k, mi, ki);\n                Scalar &Bvalue = get_element(matrixB, k, n, ki, ni);\n                Cvalue += alpha * Avalue * Bvalue;\n            }\n        }\n    }\n}\n\ntemplate <typename Scalar>\nvoid local_multiply(cosma_context<Scalar> *ctx,\n                    Scalar *matrixA,\n                    Scalar *matrixB,\n                    Scalar *matrixC,\n                    int m,\n                    int n,\n                    int k,\n                    Scalar alpha,\n                    Scalar beta,\n                    bool copy_c_back) {\n#ifdef DEBUG\n    auto t_start =\n        debug_gemm_start(matrixA, matrixB, matrixC, m, n, k, alpha, beta);\n#endif\n\n#ifdef COSMA_HAVE_GPU\n#ifdef COSMA_USE_UNIFIED_MEMORY\n    if (ctx.unified_memory()) {\n        PE(multiply_computation_gemm);\n        auto status = cublas_gemm_wrapper(\n            ctx->get_gpu_context()->get_gpu_context().get_blas_handle(0),\n            'N',\n            'N',\n            m,\n            n,\n            k,\n            &alpha,\n            matrixA,\n            matrixB,\n            &beta,\n            matrixC,\n            m);\n\n        gpu::check_blas_status(status);\n        // we need explicit synchronization over the stream to trigger the copy\n        // back to CPU memory\n        hipStreamSynchronize(\n            ctx->get_gpu_context()->get_gpu_context().get_stream(0));\n        PL();\n    } else {\n#endif // COSMA_USE_UNIFIED_MEMORY\n        PE(multiply_computation_pinning);\n        if (ctx->pin_host_buffers) {\n            ctx->get_memory_pool().pin(matrixA, m * k);\n            ctx->get_memory_pool().pin(matrixB, k * n);\n            // if (copy_c_back || std::abs(beta) > 0) {\n            ctx->get_memory_pool().pin(matrixC, m * n);\n            // }\n        }\n        PL();\n\n        PE(multiply_computation_gemm);\n        local_multiply(ctx->get_gpu_context(),\n                       matrixA,\n                       matrixB,\n                       matrixC,\n                       m,\n                       n,\n                       k,\n                       alpha,\n                       beta,\n                       false,\n                       copy_c_back);\n        PL();\n#ifdef COSMA_USE_UNIFIED_MEMORY\n    }\n#endif\n\n#else\n    PE(multiply_computation_gemm);\n    gemm(m, n, k, alpha, matrixA, m, matrixB, k, beta, matrixC, m);\n    PL();\n#endif\n\n#ifdef DEBUG\n    auto t_end =\n        debug_gemm_end(matrixA, matrixB, matrixC, m, n, k, alpha, beta);\n    std::cout << \"time(\" << m << \", \" << n << \", \" << k << \") = \"\n              << std::chrono::duration_cast<ms_t>(t_end - t_start).count()\n              << std::endl;\n#endif\n}\n\ntemplate <typename Scalar>\nvoid local_multiply(Scalar *matrixA,\n                    Scalar *matrixB,\n                    Scalar *matrixC,\n                    int m,\n                    int n,\n                    int k,\n                    Scalar alpha,\n                    Scalar beta,\n                    bool copy_c_back) {\n    local_multiply(get_context_instance<Scalar>(),\n                   matrixA,\n                   matrixB,\n                   matrixC,\n                   m,\n                   n,\n                   k,\n                   alpha,\n                   beta,\n                   copy_c_back);\n}\n\ntemplate <typename Scalar>\nvoid local_multiply(context<Scalar> &ctx,\n                    Scalar *matrixA,\n                    Scalar *matrixB,\n                    Scalar *matrixC,\n                    int m,\n                    int n,\n                    int k,\n                    Scalar alpha,\n                    Scalar beta,\n                    bool copy_c_back) {\n    local_multiply(ctx.get(),\n                   matrixA,\n                   matrixB,\n                   matrixC,\n                   m,\n                   n,\n                   k,\n                   alpha,\n                   beta,\n                   copy_c_back);\n}\n\n// explicit template instantiation using context\ntemplate void local_multiply<double>(cosma_context<double> *ctx,\n                                     double *matrixA,\n                                     double *matrixB,\n                                     double *matrixC,\n                                     int m,\n                                     int n,\n                                     int k,\n                                     double alpha,\n                                     double beta,\n                                     bool copy_c_back);\n\ntemplate void local_multiply<float>(cosma_context<float> *ctx,\n                                    float *matrixA,\n                                    float *matrixB,\n                                    float *matrixC,\n                                    int m,\n                                    int n,\n                                    int k,\n                                    float alpha,\n                                    float beta,\n                                    bool copy_c_back);\n\ntemplate void\nlocal_multiply<std::complex<double>>(cosma_context<std::complex<double>> *ctx,\n                                     std::complex<double> *matrixA,\n                                     std::complex<double> *matrixB,\n                                     std::complex<double> *matrixC,\n                                     int m,\n                                     int n,\n                                     int k,\n                                     std::complex<double> alpha,\n                                     std::complex<double> beta,\n                                     bool copy_c_back);\n\ntemplate void\nlocal_multiply<std::complex<float>>(cosma_context<std::complex<float>> *ctx,\n                                    std::complex<float> *matrixA,\n                                    std::complex<float> *matrixB,\n                                    std::complex<float> *matrixC,\n                                    int m,\n                                    int n,\n                                    int k,\n                                    std::complex<float> alpha,\n                                    std::complex<float> beta,\n                                    bool copy_c_back);\n\n// explicit template instantiation using context - no pinning\ntemplate void local_multiply_cpu<double>(double *matrixA,\n                                         double *matrixB,\n                                         double *matrixC,\n                                         int m,\n                                         int n,\n                                         int k,\n                                         double alpha,\n                                         double beta);\n\ntemplate void local_multiply_cpu<float>(float *matrixA,\n                                        float *matrixB,\n                                        float *matrixC,\n                                        int m,\n                                        int n,\n                                        int k,\n                                        float alpha,\n                                        float beta);\n\ntemplate void\nlocal_multiply_cpu<std::complex<double>>(std::complex<double> *matrixA,\n                                         std::complex<double> *matrixB,\n                                         std::complex<double> *matrixC,\n                                         int m,\n                                         int n,\n                                         int k,\n                                         std::complex<double> alpha,\n                                         std::complex<double> beta);\n\ntemplate void\nlocal_multiply_cpu<std::complex<float>>(std::complex<float> *matrixA,\n                                        std::complex<float> *matrixB,\n                                        std::complex<float> *matrixC,\n                                        int m,\n                                        int n,\n                                        int k,\n                                        std::complex<float> alpha,\n                                        std::complex<float> beta);\n\n// explicit template instantiation using context with unique_ptr context\ntemplate void local_multiply<double>(context<double> &ctx,\n                                     double *matrixA,\n                                     double *matrixB,\n                                     double *matrixC,\n                                     int m,\n                                     int n,\n                                     int k,\n                                     double alpha,\n                                     double beta,\n                                     bool copy_c_back);\n\ntemplate void local_multiply<float>(context<float> &ctx,\n                                    float *matrixA,\n                                    float *matrixB,\n                                    float *matrixC,\n                                    int m,\n                                    int n,\n                                    int k,\n                                    float alpha,\n                                    float beta,\n                                    bool copy_c_back);\n\ntemplate void\nlocal_multiply<std::complex<double>>(context<std::complex<double>> &ctx,\n                                     std::complex<double> *matrixA,\n                                     std::complex<double> *matrixB,\n                                     std::complex<double> *matrixC,\n                                     int m,\n                                     int n,\n                                     int k,\n                                     std::complex<double> alpha,\n                                     std::complex<double> beta,\n                                     bool copy_c_back);\n\ntemplate void\nlocal_multiply<std::complex<float>>(context<std::complex<float>> &ctx,\n                                    std::complex<float> *matrixA,\n                                    std::complex<float> *matrixB,\n                                    std::complex<float> *matrixC,\n                                    int m,\n                                    int n,\n                                    int k,\n                                    std::complex<float> alpha,\n                                    std::complex<float> beta,\n                                    bool copy_c_back);\n\n// explicit instantiation without context\ntemplate void local_multiply<double>(double *matrixA,\n                                     double *matrixB,\n                                     double *matrixC,\n                                     int m,\n                                     int n,\n                                     int k,\n                                     double alpha,\n                                     double beta,\n                                     bool copy_c_back);\n\ntemplate void local_multiply<float>(float *matrixA,\n                                    float *matrixB,\n                                    float *matrixC,\n                                    int m,\n                                    int n,\n                                    int k,\n                                    float alpha,\n                                    float beta,\n                                    bool copy_c_back);\n\ntemplate void\nlocal_multiply<std::complex<double>>(std::complex<double> *matrixA,\n                                     std::complex<double> *matrixB,\n                                     std::complex<double> *matrixC,\n                                     int m,\n                                     int n,\n                                     int k,\n                                     std::complex<double> alpha,\n                                     std::complex<double> beta,\n                                     bool copy_c_back);\n\ntemplate void local_multiply<std::complex<float>>(std::complex<float> *matrixA,\n                                                  std::complex<float> *matrixB,\n                                                  std::complex<float> *matrixC,\n                                                  int m,\n                                                  int n,\n                                                  int k,\n                                                  std::complex<float> alpha,\n                                                  std::complex<float> beta,\n                                                  bool copy_c_back);\n\n#ifdef COSMA_HAVE_GPU\n// explicit template instantiation using gpu context\ntemplate void local_multiply<double>(gpu::mm_handle<double> *ctx,\n                                     double *matrixA,\n                                     double *matrixB,\n                                     double *matrixC,\n                                     int m,\n                                     int n,\n                                     int k,\n                                     double alpha,\n                                     double beta,\n                                     bool pin_host_buffers,\n                                     bool copy_c_back);\n\ntemplate void local_multiply<float>(gpu::mm_handle<float> *ctx,\n                                    float *matrixA,\n                                    float *matrixB,\n                                    float *matrixC,\n                                    int m,\n                                    int n,\n                                    int k,\n                                    float alpha,\n                                    float beta,\n                                    bool pin_host_buffers,\n                                    bool copy_c_back);\n\ntemplate void\nlocal_multiply<std::complex<double>>(gpu::mm_handle<std::complex<double>> *ctx,\n                                     std::complex<double> *matrixA,\n                                     std::complex<double> *matrixB,\n                                     std::complex<double> *matrixC,\n                                     int m,\n                                     int n,\n                                     int k,\n                                     std::complex<double> alpha,\n                                     std::complex<double> beta,\n                                     bool pin_host_buffers,\n                                     bool copy_c_back);\n\ntemplate void\nlocal_multiply<std::complex<float>>(gpu::mm_handle<std::complex<float>> *ctx,\n                                    std::complex<float> *matrixA,\n                                    std::complex<float> *matrixB,\n                                    std::complex<float> *matrixC,\n                                    int m,\n                                    int n,\n                                    int k,\n                                    std::complex<float> alpha,\n                                    std::complex<float> beta,\n                                    bool pin_host_buffers,\n                                    bool copy_c_back);\n#endif\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/local_multiply.hpp",
    "content": "#pragma once\n#include <cosma/context.hpp>\n\nnamespace cosma {\n\ntemplate <typename Scalar>\nvoid local_multiply(cosma_context<Scalar> *ctx,\n                    Scalar *a,\n                    Scalar *b,\n                    Scalar *c,\n                    int m,\n                    int n,\n                    int k,\n                    Scalar alpha,\n                    Scalar beta,\n                    bool copy_c_back);\n\ntemplate <typename Scalar>\nvoid local_multiply_cpu(Scalar *a,\n                        Scalar *b,\n                        Scalar *c,\n                        int m,\n                        int n,\n                        int k,\n                        Scalar alpha,\n                        Scalar beta);\n\ntemplate <typename scalar>\nvoid local_multiply(context<scalar> &ctx,\n                    scalar *a,\n                    scalar *b,\n                    scalar *c,\n                    int m,\n                    int n,\n                    int k,\n                    scalar alpha,\n                    scalar beta,\n                    bool copy_c_back);\n\ntemplate <typename scalar>\nvoid local_multiply(scalar *a,\n                    scalar *b,\n                    scalar *c,\n                    int m,\n                    int n,\n                    int k,\n                    scalar alpha,\n                    scalar beta,\n                    bool copy_c_back);\n\n#ifdef COSMA_HAVE_GPU\ntemplate <typename scalar>\nvoid local_multiply(gpu::mm_handle<scalar> *gpu_ctx,\n                    scalar *a,\n                    scalar *b,\n                    scalar *c,\n                    int m,\n                    int n,\n                    int k,\n                    scalar alpha,\n                    scalar beta,\n                    bool pin_host_buffers,\n                    bool copy_c_back);\n#endif\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/mapper.cpp",
    "content": "#include <cosma/mapper.hpp>\n#include <cosma/profiler.hpp>\n\nnamespace cosma {\nMapper::Mapper(char label,\n               const Strategy& strategy,\n               int rank)\n    : label_(label)\n    , strategy_(&strategy)\n    , m_(strategy.n_rows(label))\n    , n_(strategy.n_cols(label))\n    , P_(strategy.P)\n    , rank_(rank) {\n    PE(preprocessing_matrices_mapper_sizes);\n    skip_ranges_ = std::vector<int>(P_);\n    rank_to_range_ =\n        std::vector<std::vector<Interval2D>>(P_, std::vector<Interval2D>());\n    mi_ = Interval(0, m_ - 1);\n    ni_ = Interval(0, n_ - 1);\n    Pi_ = Interval(0, P_ - 1);\n    compute_sizes(mi_, ni_, Pi_, 0, strategy);\n    initial_buffer_size_ = std::vector<size_t>(P_);\n    range_offset_ = std::vector<std::vector<std::size_t>>(P_, std::vector<std::size_t>());\n\n    for (size_t rank = 0; rank < P_; ++rank) {\n        size_t size = 0;\n        int matrix_id = 0;\n        for (auto &matrix : rank_to_range_[rank]) {\n            range_offset_[rank].push_back(size);\n            size += matrix.size();\n            matrix_id++;\n        }\n        range_offset_[rank].push_back(size);\n        initial_buffer_size_[rank] = size;\n        if (rank_to_range_[rank].size() == 0) {\n            std::cout << \"RANK \" << rank << \" DOES NOT OWN ANYTHING\"\n                      << std::endl;\n        }\n    }\n    PL();\n\n    PE(preprocessing_matrices_mapper_coordinates);\n    // both partitions start with 0\n    row_partition_set_ = std::set<int>{-1};\n    col_partition_set_ = std::set<int>{-1};\n    compute_range_to_rank();\n    row_partition_ =\n        std::vector<int>(row_partition_set_.begin(), row_partition_set_.end());\n    col_partition_ =\n        std::vector<int>(col_partition_set_.begin(), col_partition_set_.end());\n\n    // compute_global_coord();\n#ifdef DEBUG\n    output_layout();\n#endif\n    PL();\n    // if (rank_ >= P_) {\n    //     return;\n    // }\n}\n\nvoid Mapper::output_layout() {\n    std::cout << \"MATRIX \" << label_ << \" LAYOUT: \" << std::endl;\n    for (int i = 0; i < m_; ++i) {\n        for (int j = 0; j < n_; ++j) {\n            std::cout << local_coordinates(i, j).second << \" \";\n        }\n        std::cout << \"\\n\";\n    }\n    std::cout << \"\\n\";\n\n    std::cout << \"Row partitions:\\n\";\n    for (auto i = 0u; i < row_partition_.size(); i++) {\n        std::cout << row_partition_[i] << \" \";\n    }\n    std::cout << std::endl << std::endl;\n\n    std::cout << \"Column partitions:\\n\";\n    for (auto i = 0u; i < col_partition_.size(); i++) {\n        std::cout << col_partition_[i] << \" \";\n    }\n    std::cout << std::endl << std::endl;\n    ;\n    /*\n    std::cout << \"Range to rank:\\n\";\n    for (auto& pair : range_to_rank_) {\n        std::cout << \"Range \" << pair.first << \" is owned by rank \" <<\n    pair.second.first << \" starting at local index \" << pair.second.second <<\n    std::endl;\n    }\n    std::cout << \"\\n\\n\";\n    */\n\n    std::cout << \"Rank to range:\\n\";\n    for (auto i = 0u; i < P_; ++i) {\n        std::cout << \"Rank \" << i << \" owns:\" << std::endl;\n        for (auto &range : rank_to_range_[i]) {\n            std::cout << range << std::endl;\n        }\n        std::cout << \"\\n\\n\";\n    }\n    std::cout << \"\\n\\n\";\n}\n\n// finds the initial data layout\nvoid Mapper::compute_sizes(Interval m,\n                           Interval n,\n                           Interval P,\n                           int step,\n                           const Strategy &strategy) {\n    Interval2D submatrix(m, n);\n\n    // base case\n    if (strategy.final_step(step) || strategy.empty()) {\n        auto submatrices = rank_to_range_[P.first()];\n        rank_to_range_[P.first()].push_back(submatrix);\n        return;\n    }\n\n    int divm = strategy.divisor_row(label_, step);\n    int divn = strategy.divisor_col(label_, step);\n    int div = strategy.divisor(step);\n\n    // remember the previous number of fixed subranges\n    // for each rank. this is only used in sequential step\n    // we want the next sequential step to NOT modify the\n    // subranges from the previous sequential step\n    std::vector<int> prev_skip_ranges;\n    if (strategy.sequential_step(step)) {\n        for (int i = P.first(); i <= P.last(); ++i) {\n            prev_skip_ranges.push_back(skip_ranges_[i]);\n        }\n    }\n\n    for (int i = 0; i < div; ++i) {\n        Interval newP = P.subinterval(div, i);\n        // intervals of M, N and K that the current processor subinterval is\n        // taking care of\n        Interval newm = m.subinterval(divm, divm > 1 ? i : 0);\n        Interval newn = n.subinterval(divn, divn > 1 ? i : 0);\n\n        if (strategy.sequential_step(step)) {\n            // invoke the substeps\n            compute_sizes(newm, newn, P, step + 1, strategy);\n            // skip these elements in rank_to_range_ to make the next sequential\n            // step independent we assume that this many subranges are fixed in\n            // this sequential step and we don't want that next sequential step\n            // pop up some of the subranges stored in this sequential step (for\n            // example if sequential step is followed by copy case)\n            for (int rank = P.first(); rank <= P.last(); ++rank) {\n                skip_ranges_[rank] = rank_to_range_[rank].size();\n            }\n            // don't go in other branches if dividing over absent dimension\n            // it is still necessary to run the substep at least once\n            // because rank_to_range_ fills up only at the end of the substep\n            // and is being modified on the way back\n            if (divm * divn == 1) {\n                break;\n            }\n        } else {\n            // no-copy case\n            // here each substep will fill up different part of the sizes vector\n            if (divm * divn > 1) {\n                compute_sizes(newm, newn, newP, step + 1, strategy);\n            }\n            // copy case\n            else {\n                compute_sizes(m, n, newP, step + 1, strategy);\n\n                for (int shift = 0; shift < newP.length(); ++shift) {\n                    int rank = newP.first() + shift;\n\n                    // go through all the submatrices this rank owns\n                    auto &submatrices = rank_to_range_[rank];\n                    for (int mat = skip_ranges_[rank]; mat < submatrices.size();\n                         mat++) {\n                        auto &matrix = submatrices[mat];\n\n                        // and split it equally among all the ranks that\n                        // this rank is communicating to in this round\n                        for (int partition = 1; partition < div; ++partition) {\n                            int target = partition * newP.length() + rank;\n                            auto &vec = rank_to_range_[target];\n                            vec.push_back(matrix.submatrix(div, partition));\n                        }\n                        matrix = matrix.submatrix(div, 0);\n                    }\n                }\n                // invoke just one branch of substeps since others are the same\n                // (in the copy case)\n                break;\n            }\n        }\n    }\n\n    // if copy case is followed by a sequential step then it is necessary to not\n    // permanently skip the subranges after all sequential steps since maybe the\n    // first copy case wants to modify all the elements from the beginning of\n    // the sequential step (to subdivide all the matrices as above)\n    if (strategy.sequential_step(step)) {\n        // clean after yourself, once all sequential steps have finished\n        for (int i = P.first(); i <= P.last(); ++i) {\n            skip_ranges_[i] = prev_skip_ranges[i - P.first()];\n        }\n    }\n}\n\nsize_t Mapper::initial_size(int rank) const {\n    // check if reorered\n    // if (ranks_reordered) {\n    //     rank = ranks_reordering[rank];\n    // }\n    if (rank < P_)\n        return initial_buffer_size_[rank];\n    return 0;\n}\n\nsize_t Mapper::initial_size() const { return initial_size(rank_); }\n\nstd::vector<size_t> Mapper::all_initial_sizes() const {\n    return initial_buffer_size_;\n}\n\nconst std::vector<Interval2D> &Mapper::initial_layout(int rank) const {\n    // check if reorered\n    // if (ranks_reordered) {\n    //     rank = ranks_reordering[rank];\n    // }\n    return rank_to_range_[rank];\n}\n\nconst std::vector<Interval2D> &Mapper::initial_layout() const {\n    return initial_layout(rank_);\n}\n\nstd::vector<std::vector<Interval2D>> &Mapper::complete_layout() {\n    return rank_to_range_;\n}\n\n// computes the inverse of rank_to_range_ by iterating through it\nvoid Mapper::compute_range_to_rank() {\n    for (auto rank = 0u; rank < P_; ++rank) {\n        int matrix_id = 0;\n        for (auto matrix : rank_to_range_[rank]) {\n            range_to_rank_.insert({matrix, {rank, range_offset_[rank][matrix_id]}});\n            row_partition_set_.insert(matrix.rows.last());\n            col_partition_set_.insert(matrix.cols.last());\n            ++matrix_id;\n        }\n    }\n}\n\n// (gi, gj) -> (local_id, rank)\nstd::pair<int, int> Mapper::local_coordinates(int gi, int gj) {\n    Interval row_interval;\n    Interval col_interval;\n\n    // TODO: use segment tree to locate the interval which contains (gi, gj)\n    for (auto row_int = 1u; row_int < row_partition_.size(); ++row_int) {\n        if (row_partition_[row_int] >= gi && row_partition_[row_int - 1] < gi) {\n            row_interval = Interval(row_partition_[row_int - 1] + 1,\n                                    row_partition_[row_int]);\n            break;\n        }\n    }\n\n    for (auto col_int = 1u; col_int < col_partition_.size(); ++col_int) {\n        if (col_partition_[col_int] >= gj && col_partition_[col_int - 1] < gj) {\n            col_interval = Interval(col_partition_[col_int - 1] + 1,\n                                    col_partition_[col_int]);\n            break;\n        }\n    }\n    // range containing gi, gj\n    Interval2D range(row_interval, col_interval);\n\n    if (!range.contains(gi, gj)) {\n        std::cout << \"Error in local_coordinates(\" << gi << \", \" << gj\n                  << \") does not belong to the range \" << range << std::endl;\n    }\n\n    int rank;\n    int offset;\n    int local_index;\n\n    std::tie(rank, offset) = range_to_rank_[range];\n    // if (ranks_reordered) {\n    //     rank = ranks_reordering[rank];\n    // }\n    local_index = offset + range.local_index(gi, gj);\n\n    return {local_index, rank};\n}\n\nvoid Mapper::compute_global_coord() {\n    int index = 0;\n    global_coord = std::vector<std::pair<int, int>>(initial_size());\n    for (auto matrix_id = 0u; matrix_id < rank_to_range_[rank_].size();\n         ++matrix_id) {\n        Interval2D range = rank_to_range_[rank_][matrix_id];\n        for (auto local = 0; local < range.size(); ++local, ++index) {\n            global_coord[index] = range.global_index(local);\n        }\n    }\n}\n\n// local_id -> (gi, gj) (only for the current rank)\nstd::pair<int, int> Mapper::global_coordinates(int local_index) {\n    if (local_index >= initial_size()) {\n        return {-1, -1};\n    }\n    if (global_coord.size() == 0) {\n        compute_global_coord();\n    }\n    return global_coord[local_index];\n}\n\n// (local_id, rank) -> (gi, gj)\nstd::pair<int, int> Mapper::global_coordinates(int local_index, int rank) {\n    // if (ranks_reordered) {\n    //     rank = ranks_reordering[rank];\n    // }\n    // TODO: use segment tree to locate with matrix of all the matrices\n    // owned by rank contain the local_index\n    for (auto matrix_id = 0u; matrix_id < rank_to_range_[rank].size();\n         ++matrix_id) {\n        // range_offset_ returns the beginning index of matrix_id range\n        // if the beginning of the matrix >= local_index then this range\n        // contains local_index\n        if (range_offset_[rank][matrix_id + 1] > local_index) {\n            Interval2D range = rank_to_range_[rank][matrix_id];\n            local_index -= range_offset_[rank][matrix_id];\n\n            int x, y;\n            std::tie(x, y) = range.global_index(local_index);\n            // std::cout << \"Rank \" << rank << \", local_index = \" << local_index\n            // << \" -> (\" <<  x << \", \" << y << \")\" << std::endl;\n            return {x, y};\n        }\n    }\n    return {-1, -1};\n}\n\nchar Mapper::which_matrix() {\n    return label_;\n}\n\nstd::vector<std::size_t>& Mapper::local_blocks_offsets() {\n    return range_offset_[rank_];\n}\n\nstd::vector<Interval2D> Mapper::local_blocks() {\n    if (rank_ < strategy_->P)\n        return rank_to_range_[rank_];\n    return {};\n}\n\nint Mapper::owner(Interval2D& block) {\n    auto rank_and_offset_iterator = range_to_rank_.find(block);\n    if (rank_and_offset_iterator == range_to_rank_.end()) {\n        throw std::runtime_error(\"ERROR in mapper.cpp: the owner cannot be determined, the block not found.\");\n    }\n    assert(rank_and_offset_iterator != range_to_rank_.end());\n    auto rank_and_offset = rank_and_offset_iterator->second;\n    auto rank = rank_and_offset.first;\n    return rank;\n}\n\ncosta::assigned_grid2D Mapper::get_layout_grid() {\n    // **************************\n    // create grid2D\n    // **************************\n    // prepare row intervals\n    // and col intervals\n    std::vector<int> rows_split;\n    rows_split.reserve(row_partition_.size());\n    for (const auto& tick : row_partition_) {\n        rows_split.push_back(tick + 1);\n    }\n    std::vector<int> cols_split;\n    cols_split.reserve(col_partition_.size());\n    for (const auto& tick : col_partition_) {\n        cols_split.push_back(tick + 1);\n    }\n\n    costa::grid2D grid(std::move(rows_split), std::move(cols_split));\n\n    int n_blocks_row = grid.n_rows;\n    int n_blocks_col = grid.n_cols;\n\n    // **************************\n    // create an assigned grid2D\n    // **************************\n    // create a matrix of ranks owning each block\n    std::vector<std::vector<int>> owners(n_blocks_row,\n                                         std::vector<int>(n_blocks_col));\n    for (int i = 0; i < n_blocks_row; ++i) {\n        auto r_inter = grid.row_interval(i);\n        Interval row_interval(r_inter.start, r_inter.end - 1);\n        for (int j = 0; j < n_blocks_col; ++j) {\n            auto c_inter = grid.col_interval(j);\n            Interval col_interval(c_inter.start, c_inter.end - 1);\n\n            Interval2D range(row_interval, col_interval);\n            owners[i][j] = owner(range);\n        }\n    }\n\n    // create an assigned grid2D\n    costa::assigned_grid2D assigned_grid(\n        std::move(grid), std::move(owners), P_);\n\n    return assigned_grid;\n}\n\nint Mapper::m() const {\n    return m_;\n}\n\nint Mapper::n() const {\n    return n_;\n}\n\nint Mapper::P() const {\n    return P_;\n}\n\nint Mapper::rank() const {\n    return rank_;\n}\n\nchar Mapper::label() const {\n    return label_;\n}\n\nconst Strategy& Mapper::strategy() const {\n    return *strategy_;\n}\n\nvoid Mapper::reorder_rank(int new_rank) {\n    rank_ = new_rank;\n}\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/mapper.hpp",
    "content": "#pragma once\n\n#include <cosma/interval.hpp>\n#include <cosma/strategy.hpp>\n\n#include <costa/grid2grid/transform.hpp>\n\n#include <algorithm>\n#include <cassert>\n#include <fstream>\n#include <memory>\n#include <numeric>\n#include <set>\n#include <stdexcept>\n#include <string>\n#include <tuple>\n#include <unordered_map>\n#include <vector>\n\nnamespace cosma {\nclass Mapper {\n  public:\n    Mapper() = default;\n    Mapper(char label,\n           const Strategy& strategy,\n           int rank);\n\n    size_t initial_size(int rank) const;\n\n    size_t initial_size() const;\n\n    std::vector<size_t> all_initial_sizes() const;\n\n    // rank -> list of ranges it owns initially\n    const std::vector<Interval2D> &initial_layout(int rank) const;\n    const std::vector<Interval2D> &initial_layout() const;\n    std::vector<std::vector<Interval2D>> &complete_layout();\n\n    // (gi, gj) -> (local_id, rank)\n    std::pair<int, int> local_coordinates(int gi, int gj);\n\n    // (local_id, rank) -> (gi, gj)\n    std::pair<int, int> global_coordinates(int local_index, int rank);\n\n    // local_id -> (gi, gj) (for local elements on the current rank)\n    std::pair<int, int> global_coordinates(int local_index);\n\n    // returns the label of the matrix (A, B or C)\n    char which_matrix();\n\n    // get a vector of offsets of each local block\n    std::vector<std::size_t>& local_blocks_offsets();\n\n    // get a vector of local blocks\n    std::vector<Interval2D> local_blocks();\n\n    // returns a rank owning given block\n    int owner(Interval2D& block);\n\n    costa::assigned_grid2D get_layout_grid();\n\n    int m() const;\n    int n() const;\n    int P() const;\n    int rank() const;\n    char label() const;\n    const Strategy& strategy() const;\n\n    // changes the current rank to new_rank\n    // this is used when we want to reorder ranks\n    // in order to minimize the communication volume\n    // if matrices are initially given in a different\n    // data layout\n    void reorder_rank(int new_rank);\n\n  protected:\n    // A, B or C\n    char label_;\n    /// Number of rows of the global atrix\n    int m_;\n    /// Number of columns of the global matrix\n    int n_;\n    /// Maximum number of rank in the global communicator\n    size_t P_;\n    int rank_;\n    const Strategy* strategy_;\n\n    // rank -> list of submatrices that this rank owns\n    // the number of submatrices that this rank owns\n    // is equal to the number of sequential steps in which\n    // this matrix was divided\n    std::vector<std::vector<Interval2D>> rank_to_range_;\n    std::unordered_map<Interval2D, std::pair<int, std::size_t>> range_to_rank_;\n\n    // rank -> total initial buffer size\n    std::vector<size_t> initial_buffer_size_;\n\n    // rank -> vector of sizes of all the ranges that this rank owns\n    std::vector<std::vector<std::size_t>> range_offset_;\n\n    Interval mi_;\n    Interval ni_;\n    Interval Pi_;\n\n  private:\n    // used by sequential steps.\n    // rank -> number of submatrices fixed by the previous sequential step\n    std::vector<int> skip_ranges_;\n\n    std::set<int> row_partition_set_;\n    std::set<int> col_partition_set_;\n    std::vector<int> row_partition_;\n    std::vector<int> col_partition_;\n\n    std::vector<std::pair<int, int>> global_coord;\n\n    void compute_sizes(Interval m,\n                       Interval n,\n                       Interval P,\n                       int step,\n                       const Strategy &strategy);\n    void output_layout();\n    void compute_range_to_rank();\n\n    void compute_global_coord();\n};\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/math_utils.cpp",
    "content": "#include <cosma/math_utils.hpp>\n\nnamespace cosma {\nint math_utils::gcd(int a, int b) { return b == 0 ? a : gcd(b, a % b); }\n\nlong long math_utils::divide_and_round_up(long long x, long long y) {\n    return 1 + ((x - 1) / y);\n}\n\nint math_utils::next_multiple_of(int n_to_round, int multiple) {\n    if (multiple == 0)\n        return n_to_round;\n\n    int remainder = n_to_round % multiple;\n    if (remainder == 0)\n        return n_to_round;\n\n    return n_to_round + multiple - remainder;\n}\n\n// find all divisors of a given number n\nstd::vector<int> math_utils::find_divisors(int n) {\n    std::vector<int> divs;\n    for (int i = 1; i <= n; ++i) {\n        if (n % i == 0) {\n            divs.push_back(i);\n        }\n    }\n    return divs;\n}\n\n// Finds the divisors dm, dn and dk for m, n and k respectively, such that\n// 1. dm * dn * dk <= P\n// 2. dm <= min(m, n, m/local_problem_size)\n// 3. dn <= min(n, k, n/local_problem_size)\n// 5. dk <= min(k, n, k/local_problem_size)\n// 6. balanced: m/dm approx= n/dn approx= k/dk\n//\n// For the upper bound on divisors, the following conditions are taken into account:\n//     - layout-conditions: the matrix that is not split, i.e. which does not\n//                          contain the split dimension, must have #columns\n//                          at least as large as the divisor of that dimension\n//     - min-problem-size: the minimum size of the corresponding dimension\n//                         after splitting should be at least min_problem_size\n//     - mathematical: divisor or some dimension should be at least 1 (i.e.\n// \nstd::tuple<int, int, int>\nmath_utils::balanced_divisors(long long m, long long n, long long k,\n                              int P, int min_local_problem_size) {\n    // each divisor can be at most the value of the dimension\n    auto max_divm = std::min(\n                            // layout condition + mathematical\n                            // the matrix that is not split here (i.e. B)\n                            // must have #colums >= divm\n                            std::min(m, n), \n                            // min_problem_size condition\n                            m/min_local_problem_size); // min_prob_size condition\n    max_divm = std::max(1LL, max_divm);\n    auto max_divn = std::min(std::min(k, n),\n                             n/min_local_problem_size);\n    max_divn = std::max(1LL, max_divn);\n    auto max_divk = std::min(std::min(k, n),\n                             k/min_local_problem_size);\n    max_divk = std::max(1LL, max_divk);\n\n    // protect from overflow by adding redundant checks\n    if (max_divm < P && max_divn < P && max_divk < P\n            && max_divm * max_divn < P\n            && max_divm * max_divn * max_divk < P) {\n        P = (int) (max_divm * max_divn * max_divk);\n    }\n\n    // sort the dimensions\n    std::vector<int> dims = {(int)m, (int)n, (int)k};\n    std::sort(dims.begin(), dims.end());\n\n    double target_tile_size = 0.0;\n    // avoid overflow\n    if (dims[2] >= P) {\n        target_tile_size = std::cbrt(1.0 * dims[2]/ P * dims[0] * dims[1]);\n    } else if (dims[1] * dims[2] >= P) {\n        target_tile_size = std::cbrt(1.0 * dims[1] * dims[2] / P * dims[0]);\n    } else {\n        target_tile_size = std::cbrt(1.0 * dims[0] * dims[1] * dims[2] / P);\n    }\n\n    int error = std::numeric_limits<int>::max();\n    int divm = 1;\n    int divn = 1;\n    int divk = 1;\n\n    for (const int &div1 : find_divisors(P)) {\n        if (div1 > max_divm) break;\n\n        int error_lower_bound = std::abs(m / div1 - target_tile_size);\n        if (error_lower_bound > error) {\n            continue;\n        }\n        for (const int &div2 : find_divisors(P / div1)) {\n            if (div2 > max_divn) break;\n            int div3 = std::min((P / div1) / div2, (int) max_divk);\n            int current_error = std::abs(m / div1 - target_tile_size) +\n                                std::abs(n / div2 - target_tile_size) +\n                                std::abs(k / div3 - target_tile_size);\n            // prefer new divisors if they make tile size closer to the target size\n            // or if they utilize more processors\n            if (div1 * div2 * div3 > divm * divn * divk ||\n                div1 * div2 * div3 == divm * divn * divk && current_error < error) {\n                divm = div1;\n                divn = div2;\n                divk = div3;\n\n                error = current_error;\n            }\n        }\n    }\n    return std::make_tuple(divm, divn, divk);\n}\n\n// find all prime factors of a given number n\nstd::vector<int> math_utils::decompose(int n) {\n    std::vector<int> factors;\n    int orig_n = n;\n\n    // number of 2s that divide n\n    while (n % 2 == 0) {\n        factors.push_back(2);\n        n = n / 2;\n    }\n\n    // n must be odd at this point.\n    // we can skip one element\n    for (int i = 3; i <= std::sqrt(n); i = i + 2) {\n        // while i divides n, print i and divide n\n        while (n % i == 0) {\n            factors.push_back(i);\n            n = n / i;\n        }\n    }\n\n    // This condition is to handle the case when n\n    // is a prime number greater than 2\n    if (n > 2) {\n        factors.push_back(n);\n    }\n\n    // std::cout << \"factors of \" << orig_n << \" are: \";\n    // for (const auto& el : factors)\n    //     std::cout << el << \", \";\n    // std::cout << std::endl;\n    return factors;\n}\n\nint math_utils::closest_divisor(int P, int dimension, double target) {\n    int divisor = 1;\n    int error;\n    int best_error = std::numeric_limits<int>::max();\n    int best_div = 1;\n\n    for (int i : find_divisors(P)) {\n        error = std::abs(1.0 * dimension / i - target);\n\n        if (error <= best_error) {\n            best_div = i;\n            best_error = error;\n        }\n    }\n\n    return best_div;\n}\n\nint math_utils::int_div_up(int numerator, int denominator) {\n    return numerator / denominator +\n           (((numerator < 0) ^ (denominator > 0)) && (numerator % denominator));\n}\n\ndouble math_utils::square_score(int rows, int cols) {\n    if (rows == 0 || cols == 0) {\n        std::runtime_error(\"square_score function called with zero-dimension.\");\n    }\n    double ratio1 = 1.0 * rows / cols;\n    double ratio2 = 1.0 * cols / rows;\n    return (ratio1 + ratio2) / (2.0 * std::max(ratio1, ratio2));\n}\n\ndouble math_utils::square_score(int m, int n, int k) {\n    double score_a = square_score(m, k);\n    double score_b = square_score(k, n);\n    double score_c = square_score(m, n);\n\n    return score_a * score_b * score_c;\n}\n\nstd::pair<int, int> math_utils::invert_cantor_pairing(int z) {\n    int w = (int)std::floor((std::sqrt(8 * z + 1) - 1) / 2);\n    int t = (w * w + w) / 2;\n    int y = z - t;\n    int x = w - y;\n    return {x, y};\n}\n\n// maps (N, N) -> N\nint math_utils::cantor_pairing(const int i, const int j) {\n    int sum = i + j;\n    return (sum * (sum + 1)) / 2 + j;\n}\n\n// check if the number is a power of 2\nbool math_utils::is_power_of_2(std::size_t n) {\n    return !(n & (n - 1));\n}\n\n// find the next power of 2 that is > than n\nstd::size_t \nmath_utils::next_greater_power_of_2(std::size_t n, std::size_t power_of_2) {\n    return n == 0\n               ? power_of_2 \n               : next_greater_power_of_2(n - (n & power_of_2), power_of_2 << 1);\n}\n\n// find the next power of 2 that is >= n\nstd::size_t \nmath_utils::next_power_of_2(std::size_t n) {\n    return is_power_of_2(n) ? n : next_greater_power_of_2(n);\n}\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/math_utils.hpp",
    "content": "#pragma once\n\n#include <algorithm>\n#include <cmath>\n#include <iostream>\n#include <limits>\n#include <tuple>\n#include <vector>\n#include <complex>\n\nnamespace cosma {\n\n// check if the type in template is std::complex or not\ntemplate<typename T>\nstruct is_complex : std::false_type {};\n\ntemplate<typename T>\nstruct is_complex<std::complex<T>> : std::true_type {};\n\nnamespace math_utils {\n// greates common divisor of a and b\nint gcd(int a, int b);\n\n// divides and rounds up long long integers\nlong long divide_and_round_up(long long x, long long y);\n\n// round to next multiple\nint next_multiple_of(int n_to_round, int multiple);\n\n// check if the number is a power of 2\nbool is_power_of_2(std::size_t n);\n\n// find the next power of 2 that is > than n\nstd::size_t next_greater_power_of_2(std::size_t n, std::size_t power_of_2 = 1);\n\n// find the next power of 2 that is >= n\nstd::size_t next_power_of_2(std::size_t n);\n\n// find all divisors of n\nstd::vector<int> find_divisors(int n);\n// Finds the divisors dm, dn and dk for m, n and k respectively, such that\n// 1. dm * dn * dk <= P\n// 2. dm <= min(m, n, m/local_problem_size)\n// 3. dn <= min(n, k, n/local_problem_size)\n// 5. dk <= min(k, n, k/local_problem_size)\n// 6. balanced: m/dm approx= n/dn approx= k/dk\n//\n// For the upper bound on divisors, the following conditions are taken into account:\n//     - layout-conditions: the matrix that is not split, i.e. which does not\n//                          contain the split dimension, must have #columns\n//                          at least as large as the divisor of that dimension\n//     - min-problem-size: the minimum size of the corresponding dimension\n//                         after splitting should be at least min_problem_size\n//     - mathematical: divisor or some dimension should be at least 1 (i.e.\n// \nstd::tuple<int, int, int>\nbalanced_divisors(long long m, long long n, long long k, \n                          int P, int min_problem_size);\n\n// prime decomposition of n\nstd::vector<int> decompose(int n);\n\n// finds divisor of P closest to dimensions/target\nint closest_divisor(int P, int dimension, double target);\n\n// divide numerator by denominator and round it up to int\nint int_div_up(int numerator, int denominator);\n\n// returns a value (0, 1], that describes how close to the square matrix,\n// the matrix with dimensions rows x cols is.\ndouble square_score(int rows, int cols);\n\n// returns a value (0, 1] that describes how close the performance\n// of gemm(m, n, k) is to the performance of a corresponding square case\n// gemm(q, q, q) where q = cubic_root(m*n*k)\ndouble square_score(int m, int n, int k);\n\nint cantor_pairing(const int i, const int j);\nstd::pair<int, int> invert_cantor_pairing(int z);\n}; // namespace math_utils\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/matrix.cpp",
    "content": "#include <cosma/matrix.hpp>\n#include <mpi.h>\n\n#include <complex>\n\nnamespace cosma {\n\nextern template class Buffer<double>;\n\n// using a pointer to cosma_context\ntemplate <typename T>\nCosmaMatrix<T>::CosmaMatrix(cosma_context<T> *ctxt,\n                            char label,\n                            const Strategy &strategy,\n                            int rank,\n                            bool dry_run)\n    : ctxt_(ctxt)\n    , mapper_(Mapper(label, strategy, rank))\n    , rank_(mapper_.rank())\n    , strategy_(mapper_.strategy())\n    , label_(mapper_.label())\n    , m_(mapper_.m())\n    , n_(mapper_.n())\n    , P_(mapper_.P()) {\n\n    if (rank < P_) {\n        layout_ = Layout(&mapper_);\n\n        buffer_ =\n            buffer_t(ctxt_, &mapper_, &layout_, dry_run);\n    }\n}\n\n// with given mapper\ntemplate <typename T>\nCosmaMatrix<T>::CosmaMatrix(cosma_context<T> *ctxt,\n                            Mapper &&mapper,\n                            int rank,\n                            bool dry_run)\n    : ctxt_(ctxt)\n    , mapper_(std::forward<Mapper>(mapper))\n    , rank_(rank)\n    , strategy_(mapper_.strategy())\n    , label_(mapper_.label())\n    , m_(mapper_.m())\n    , n_(mapper_.n())\n    , P_(mapper_.P()) {\n\n    mapper_.reorder_rank(rank);\n    if (rank < P_) {\n        layout_ = Layout(&mapper_);\n        buffer_ =\n            buffer_t(ctxt_, &mapper_, &layout_, dry_run);\n    }\n}\n\n// using custom context\ntemplate <typename T>\nCosmaMatrix<T>::CosmaMatrix(std::unique_ptr<cosma_context<T>> &ctxt,\n                            char label,\n                            const Strategy &strategy,\n                            int rank,\n                            bool dry_run)\n    : CosmaMatrix(ctxt.get(), label, strategy, rank, dry_run) {}\n\n// with given mapper\ntemplate <typename T>\nCosmaMatrix<T>::CosmaMatrix(std::unique_ptr<cosma_context<T>> &ctxt,\n                            Mapper &&mapper,\n                            int rank,\n                            bool dry_run)\n    : CosmaMatrix(ctxt.get(), std::forward<Mapper &&>(mapper), rank, dry_run) {}\n\n// using global (singleton) context\ntemplate <typename T>\nCosmaMatrix<T>::CosmaMatrix(char label,\n                            const Strategy &strategy,\n                            int rank,\n                            bool dry_run)\n    : CosmaMatrix(get_context_instance<T>(), label, strategy, rank, dry_run) {}\n\n// with given mapper\ntemplate <typename T>\nCosmaMatrix<T>::CosmaMatrix(Mapper &&mapper, int rank, bool dry_run)\n    : CosmaMatrix(get_context_instance<T>(),\n                  std::forward<Mapper &&>(mapper),\n                  rank,\n                  dry_run) {}\n\ntemplate <typename T>\nint CosmaMatrix<T>::m() {\n    return m_;\n}\n\ntemplate <typename T>\nint CosmaMatrix<T>::n() {\n    return n_;\n}\n\ntemplate <typename T>\nchar CosmaMatrix<T>::label() {\n    return label_;\n}\n\ntemplate <typename T>\nint CosmaMatrix<T>::buffer_index() {\n    if (rank_ < P_) {\n        return buffer_.buffer_index();\n    }\n    return -1;\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::set_buffer_index(int idx) {\n    if (rank_ < P_) {\n        buffer_.set_buffer_index(idx);\n    }\n}\n\ntemplate <typename T>\ntypename CosmaMatrix<T>::scalar_t *CosmaMatrix<T>::buffer_ptr() {\n    if (rank_ < P_) {\n        return buffer_.buffer_ptr();\n    }\n    return nullptr;\n}\n\ntemplate <typename T>\nsize_t CosmaMatrix<T>::buffer_size() {\n    if (rank_ < P_) {\n        return buffer_.buffer_size();\n    }\n    return 0;\n}\n\ntemplate <typename T>\ntypename CosmaMatrix<T>::scalar_t *CosmaMatrix<T>::reshuffle_buffer_ptr() {\n    if (rank_ < P_) {\n        return buffer_.reshuffle_buffer_ptr();\n    }\n    return nullptr;\n}\n\ntemplate <typename T>\ntypename CosmaMatrix<T>::scalar_t *CosmaMatrix<T>::reduce_buffer_ptr() {\n    if (rank_ < P_) {\n        return buffer_.reduce_buffer_ptr();\n    }\n    return nullptr;\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::swap_reduce_buffer_with(size_t buffer_idx) {\n    if (rank_ < P_) {\n        buffer_.swap_reduce_buffer_with(buffer_idx);\n    }\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::advance_buffer() {\n    if (rank_ < P_) {\n        buffer_.advance_buffer();\n    }\n}\n\ntemplate <typename T>\nconst std::vector<Interval2D> &CosmaMatrix<T>::initial_layout(int rank) const {\n    return mapper_.initial_layout(rank);\n}\n\ntemplate <typename T>\nconst std::vector<Interval2D> &CosmaMatrix<T>::initial_layout() const {\n    return mapper_.initial_layout();\n}\n\n// (gi, gj) -> (local_id, rank)\ntemplate <typename T>\nstd::pair<int, int> CosmaMatrix<T>::local_coordinates(int gi, int gj) {\n    return mapper_.local_coordinates(gi, gj);\n}\n\n// (local_id, rank) -> (gi, gj)\ntemplate <typename T>\nstd::pair<int, int> CosmaMatrix<T>::global_coordinates(int local_index,\n                                                       int rank) {\n    return mapper_.global_coordinates(local_index, rank);\n}\n\n// local_id -> (gi, gj) for local elements on the current rank\ntemplate <typename T>\nstd::pair<int, int> CosmaMatrix<T>::global_coordinates(int local_index) {\n    return mapper_.global_coordinates(local_index);\n}\n\ntemplate <typename T>\ntypename CosmaMatrix<T>::scalar_t *CosmaMatrix<T>::matrix_pointer() {\n    if (rank_ < P_) {\n        return buffer_.initial_buffer_ptr();\n    }\n    return nullptr;\n}\n\ntemplate <typename T>\nconst typename CosmaMatrix<T>::scalar_t *\nCosmaMatrix<T>::matrix_pointer() const {\n    if (rank_ < P_) {\n        return buffer_.initial_buffer_ptr();\n    }\n    return nullptr;\n}\n\ntemplate <typename T>\nsize_t CosmaMatrix<T>::matrix_size() const {\n    return mapper_.initial_size();\n}\n\ntemplate <typename T>\nsize_t CosmaMatrix<T>::matrix_size(int rank) const {\n    return mapper_.initial_size(rank);\n}\n\ntemplate <typename T>\nchar CosmaMatrix<T>::which_matrix() {\n    return label_;\n}\n\ntemplate <typename T>\nint CosmaMatrix<T>::shift(int rank, int seq_bucket) {\n    if (rank < P_) {\n        int offset = layout_.offset(rank, seq_bucket);\n        current_mat += offset;\n        return offset;\n    }\n    return -1;\n}\n\ntemplate <typename T>\nint CosmaMatrix<T>::shift(int seq_bucket) {\n    if (rank_ < P_) {\n        int offset = layout_.offset(seq_bucket);\n        current_mat += offset;\n        return offset;\n    }\n    return -1;\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::unshift(int offset) {\n    if (rank_ < P_) {\n        current_mat -= offset;\n    }\n}\n\ntemplate <typename T>\nint CosmaMatrix<T>::seq_bucket(int rank) {\n    if (rank < P_) {\n        return layout_.seq_bucket(rank);\n    }\n    return -1;\n}\n\ntemplate <typename T>\nint CosmaMatrix<T>::seq_bucket() {\n    if (rank_ < P_) {\n        return layout_.seq_bucket();\n    }\n    return -1;\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::update_buckets(Interval &P, Interval2D &range) {\n    if (rank_ < P_) {\n        layout_.update_buckets(P, range);\n    }\n}\n\ntemplate <typename T>\nstd::vector<int> CosmaMatrix<T>::seq_buckets(Interval &newP) {\n    if (rank_ < P_) {\n        return layout_.seq_buckets(newP);\n    }\n    return {};\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::set_seq_buckets(Interval &newP,\n                                     std::vector<int> &pointers) {\n    if (rank_ < P_) {\n        layout_.set_seq_buckets(newP, pointers);\n    }\n}\n\ntemplate <typename T>\nint CosmaMatrix<T>::size(int rank) {\n    if (rank < P_) {\n        return layout_.size(rank);\n    }\n    return 0;\n}\n\ntemplate <typename T>\nint CosmaMatrix<T>::size() {\n    if (rank_ < P_) {\n        return layout_.size();\n    }\n    return 0;\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::buffers_before_expansion(\n    Interval &P,\n    Interval2D &range,\n    std::vector<std::vector<int>> &size_per_rank,\n    std::vector<int> &total_size_per_rank) {\n    if (rank_ < P_) {\n        layout_.buffers_before_expansion(\n            P, range, size_per_rank, total_size_per_rank);\n    }\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::buffers_after_expansion(\n    Interval &P,\n    Interval &newP,\n    std::vector<std::vector<int>> &size_per_rank,\n    std::vector<int> &total_size_per_rank,\n    std::vector<std::vector<int>> &new_size,\n    std::vector<int> &new_total) {\n    if (rank_ < P_) {\n        layout_.buffers_after_expansion(\n            P, newP, size_per_rank, total_size_per_rank, new_size, new_total);\n    }\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::set_sizes(Interval &newP,\n                               std::vector<std::vector<int>> &size_per_rank,\n                               int offset) {\n    if (rank_ < P_) {\n        layout_.set_sizes(newP, size_per_rank, offset);\n    }\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::set_sizes(Interval &newP,\n                               std::vector<std::vector<int>> &size_per_rank) {\n    if (rank_ < P_) {\n        layout_.set_sizes(newP, size_per_rank);\n    }\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::set_sizes(int rank, std::vector<int> &sizes, int start) {\n    if (rank < P_) {\n        layout_.set_sizes(rank, sizes, start);\n    }\n}\n\ntemplate <typename T>\ntypename CosmaMatrix<T>::scalar_t &CosmaMatrix<T>::\noperator[](const typename std::vector<scalar_t>::size_type index) {\n    if (index < matrix_size()) {\n        std::runtime_error(\"Matrix index out of bounds.\");\n    }\n    return matrix_pointer()[index];\n}\n\ntemplate <typename T>\ntypename CosmaMatrix<T>::scalar_t CosmaMatrix<T>::\noperator[](const typename std::vector<scalar_t>::size_type index) const {\n    if (index < matrix_size()) {\n        std::runtime_error(\"Matrix index out of bounds.\");\n    }\n    return matrix_pointer()[index];\n}\n\ntemplate <typename T>\ntypename CosmaMatrix<T>::scalar_t *CosmaMatrix<T>::current_matrix() {\n    return current_mat;\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::initialize() {\n    current_mat = matrix_pointer();\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::set_current_matrix(scalar_t *mat) {\n    current_mat = mat;\n}\n\ntemplate <typename T>\ncosta::grid_layout<T> CosmaMatrix<T>::get_grid_layout() {\n    // **************************\n    // get an assigned grid2D\n    // **************************\n    auto assigned_grid = mapper_.get_layout_grid();\n\n    // **************************\n    // create local memory view\n    // **************************\n    // get coordinates of current rank in a rank decomposition\n    std::vector<costa::block<T>> loc_blocks;\n    for (auto matrix_id = 0u; matrix_id < mapper_.local_blocks().size();\n         ++matrix_id) {\n        Interval2D range = mapper_.local_blocks()[matrix_id];\n        int offset = mapper_.local_blocks_offsets()[matrix_id];\n\n        costa::interval row_interval(range.rows.first(),\n                                         range.rows.last() + 1);\n        costa::interval col_interval(range.cols.first(),\n                                         range.cols.last() + 1);\n\n        int stride = row_interval.length();\n\n        costa::block<T> b(assigned_grid,\n                              row_interval,\n                              col_interval,\n                              matrix_pointer() + offset,\n                              stride);\n\n        assert(b.non_empty());\n\n        loc_blocks.push_back(b);\n    }\n    costa::local_blocks<T> local_memory(std::move(loc_blocks));\n\n    return {std::move(assigned_grid), std::move(local_memory), 'C'};\n}\n\n// allocates initial buffers (turns off dryrun)\ntemplate <typename T>\nvoid CosmaMatrix<T>::allocate() {\n    if (rank_ < P_) {\n        bool dryrun = false;\n        buffer_.allocate_initial_buffers(dryrun);\n        initialize();\n    }\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::allocate_communication_buffers() {\n    if (rank_ < P_)\n        buffer_.allocate_communication_buffers();\n}\n\ntemplate <typename T>\nvoid CosmaMatrix<T>::free_communication_buffers() {\n    if (rank_ < P_)\n        buffer_.free_communication_buffers();\n}\n\ntemplate <typename T>\ncosma_context<T> *CosmaMatrix<T>::get_context() {\n    return ctxt_;\n}\n\ntemplate <typename T>\nint CosmaMatrix<T>::rank() const {\n    return rank_;\n}\n\n// total memory = initial memory + communication memory\ntemplate <typename T>\nstd::vector<size_t> CosmaMatrix<T>::required_memory() {\n    if (rank_ < P_)\n        return buffer_.get_all_buffer_sizes();\n    return std::vector<std::size_t>{};\n}\n\n// Explicit instantiations\n//\ntemplate class CosmaMatrix<float>;\ntemplate class CosmaMatrix<double>;\ntemplate class CosmaMatrix<std::complex<float>>;\ntemplate class CosmaMatrix<std::complex<double>>;\n\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/matrix.hpp",
    "content": "#pragma once\n#include <algorithm>\n#include <cassert>\n#include <fstream>\n#include <iostream>\n#include <memory>\n#include <numeric>\n#include <set>\n#include <stdexcept>\n#include <string>\n#include <tuple>\n#include <unordered_map>\n#include <vector>\n\n#include <cosma/context.hpp>\n#include <cosma/buffer.hpp>\n#include <cosma/interval.hpp>\n#include <cosma/layout.hpp>\n#include <cosma/mapper.hpp>\n#include <cosma/strategy.hpp>\n\n#include <costa/grid2grid/transform.hpp>\n\nnamespace cosma {\n\ntemplate <typename Scalar>\nclass CosmaMatrix {\n  public:\n    using scalar_t = Scalar;\n    using buffer_t = Buffer<scalar_t>;\n\n    // using a pointer to cosma_context\n    CosmaMatrix(cosma_context<Scalar> *ctxt,\n                char label,\n                const Strategy &strategy,\n                int rank,\n                bool dry_run = false);\n    CosmaMatrix(cosma_context<Scalar> *ctxt,\n                Mapper &&mapper,\n                int rank,\n                bool dry_run = false);\n\n    // using a custom context\n    CosmaMatrix(std::unique_ptr<cosma_context<Scalar>> &ctxt,\n                char label,\n                const Strategy &strategy,\n                int rank,\n                bool dry_run = false);\n    CosmaMatrix(std::unique_ptr<cosma_context<Scalar>> &ctxt,\n                Mapper &&mapper,\n                int rank,\n                bool dry_run = false);\n\n    // using global (singleton) context\n    CosmaMatrix(char label,\n                const Strategy &strategy,\n                int rank,\n                bool dry_run = false);\n    CosmaMatrix(Mapper &&mapper, int rank, bool dry_run = false);\n\n    int m();\n    int n();\n    char label();\n\n    // **********************************************\n    // METHODS FROM mapper.hpp\n    // **********************************************\n    // (gi, gj) -> (local_id, rank)\n    std::pair<int, int> local_coordinates(int gi, int gj);\n    // (local_id, rank) -> (gi, gj)\n    std::pair<int, int> global_coordinates(int local_index, int rank);\n    // local_id -> (gi, gj) for local elements on the current rank\n    std::pair<int, int> global_coordinates(int local_index);\n\n    char which_matrix();\n\n    const std::vector<Interval2D> &initial_layout(int rank) const;\n    const std::vector<Interval2D> &initial_layout() const;\n\n    // **********************************************\n    // METHODS FROM layout.hpp\n    // **********************************************\n    int shift(int rank, int seq_bucket);\n    int shift(int seq_bucket);\n    void unshift(int offset);\n\n    void update_buckets(Interval &P, Interval2D &range);\n    int seq_bucket(int rank);\n    int seq_bucket();\n    std::vector<int> seq_buckets(Interval &newP);\n    void set_seq_buckets(Interval &newP, std::vector<int> &pointers);\n\n    int size(int rank);\n    int size();\n\n    void buffers_before_expansion(Interval &P,\n                                  Interval2D &range,\n                                  std::vector<std::vector<int>> &size_per_rank,\n                                  std::vector<int> &total_size_per_rank);\n\n    void buffers_after_expansion(Interval &P,\n                                 Interval &newP,\n                                 std::vector<std::vector<int>> &size_per_rank,\n                                 std::vector<int> &total_size_per_rank,\n                                 std::vector<std::vector<int>> &new_size,\n                                 std::vector<int> &new_total);\n\n    void set_sizes(Interval &newP,\n                   std::vector<std::vector<int>> &size_per_rank,\n                   int offset);\n    void set_sizes(Interval &newP,\n                   std::vector<std::vector<int>> &size_per_rank);\n    void set_sizes(int rank, std::vector<int> &sizes, int start);\n\n    // **********************************************\n    // METHODS FROM buffer.hpp\n    // **********************************************\n    // prepares next buffer\n    void advance_buffer();\n    // returns the current buffer id\n    int buffer_index();\n    // sets the current buffer to idx\n    void set_buffer_index(int idx);\n    // returns the pointer to the current buffer\n    scalar_t *buffer_ptr();\n    size_t buffer_size();\n    // returns the pointer to the reshuffle buffer\n    // that is used when n_blocks > 1 (i.e. when sequential steps are present)\n    // as a temporary buffer in which the data is reshuffled.\n    scalar_t *reshuffle_buffer_ptr();\n    // pointer to the reduce buffer that is used as a\n    // temporary buffer in parallel-reduce (two-sided) communicator\n    // in case when beta > 0 in that step\n    scalar_t *reduce_buffer_ptr();\n    // swaps current and reduce buffers\n    void swap_reduce_buffer_with(size_t buffer_idx);\n\n    // **********************************************\n    // NEW METHODS\n    // **********************************************\n    scalar_t &operator[](const typename std::vector<scalar_t>::size_type index);\n    scalar_t\n    operator[](const typename std::vector<scalar_t>::size_type index) const;\n\n    // outputs matrix in a format:\n    //      row, column, value\n    // for all local elements on the current rank\n    template <typename Scalar_>\n    friend std::ostream &operator<<(std::ostream &os,\n                                    const CosmaMatrix<Scalar_> &mat);\n\n    // get a pointer to the initial/final data\n    scalar_t *matrix_pointer();\n    const scalar_t *matrix_pointer() const;\n    size_t matrix_size() const;\n    size_t matrix_size(int rank) const;\n\n    // pointer to send buffer\n    // scalar_t* buffer_ptr();\n    // std::vector<scalar_t, mpi_allocator<scalar_t>>& buffer();\n    // pointer to current matrix (send buffer)\n    scalar_t *current_matrix();\n\n    // this should be invoked after all allocations are finished\n    // it will query the memory pool for the current buffers\n    void initialize();\n\n    void set_current_matrix(scalar_t *mat);\n\n    costa::grid_layout<scalar_t> get_grid_layout();\n\n    void allocate_communication_buffers();\n    void free_communication_buffers();\n\n    cosma_context<scalar_t> *get_context();\n\n    int rank() const;\n\n    // returns an array of all buffer sizes\n    // that are used for this matrix.\n    // This includes the buffer for initial matrices\n    // as well as additional communication buffers.\n    std::vector<size_t> required_memory();\n\n    // turns off dry-run mode, allocate initial buffers\n    void allocate();\n\n  protected:\n    cosma_context<scalar_t> *ctxt_;\n    // mapper containing information\n    // about the global grid (data layout)\n    Mapper mapper_;\n    // current rank\n    int rank_;\n    // strategy\n    const Strategy &strategy_;\n\n    // A, B or C\n    char label_;\n    /// Number of rows of the global matrix\n    int m_;\n    /// Number of columns of the global matrix\n    int n_;\n    /// Maximum number of rank in the global communicator\n    size_t P_;\n\n    /// temporary local matrix\n    size_t current_mat_id;\n    scalar_t *current_mat;\n\n    Layout layout_;\n    buffer_t buffer_;\n};\n\ntemplate <typename Scalar>\nstd::ostream &operator<<(std::ostream &os, CosmaMatrix<Scalar> &mat) {\n    for (auto local = 0; local < mat.matrix_size(); ++local) {\n        auto value = mat[local];\n        int row, col;\n        std::tie(row, col) = mat.global_coordinates(local);\n        os << row << \" \" << col << \" \" << value << std::endl;\n    }\n    return os;\n}\n\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/memory_pool.cpp",
    "content": "#include <cassert>\n#include <complex>\n#include <cosma/memory_pool.hpp>\n#include <iostream>\n#include <mpi.h>\n\ntemplate <typename T>\ncosma::memory_pool<T>::memory_pool() {}\n\ntemplate <typename T>\ncosma::memory_pool<T>::~memory_pool() {\n    this->unpin_all();\n}\n\ntemplate <typename T>\nsize_t cosma::memory_pool<T>::get_buffer_id(size_t size) {\n    auto alignment = aligned_allocator<T>::get_alignment();\n    assert(size > 0);\n\n    // take the alignment into account\n    if (alignment > 0) {\n        size += aligned_allocator<T>::get_alignment_padding(size);\n    }\n\n    size_t offset = pool_size_;\n    pool_size_ += size;\n    ++n_buffers_;\n\n    assert(alignment <= 0 || aligned_allocator<T>::get_alignment_padding(offset) == 0);\n    assert(alignment <= 0 || aligned_allocator<T>::get_alignment_padding(pool_size_) == 0);\n    return offset;\n}\n\ntemplate <typename T>\nT* cosma::memory_pool<T>::get_buffer_pointer(size_t id) {\n    auto alignment = aligned_allocator<T>::get_alignment();\n    assert(alignment <= 0 || aligned_allocator<T>::get_alignment_padding(id) == 0);\n    if (pool_size_ > pool_capacity_) {\n        resize(pool_size_);\n    }\n    assert(id < pool_capacity_);\n    return pool_.data() + id;\n}\n\ntemplate <typename T>\nvoid cosma::memory_pool<T>::free_buffer(T* ptr, size_t size) {\n    auto alignment = aligned_allocator<T>::get_alignment();\n    // take the alignment into account\n    if (alignment > 0) {\n        size += aligned_allocator<T>::get_alignment_padding(size);\n        assert(aligned_allocator<T>::get_alignment_padding(size) == 0);\n    }\n\n    // std::cout << \"freeing buffer of size \" << size << \", current size =  \" << pool_size_ << std::endl;\n    assert(pool_size_ >= size);\n    pool_size_ -= size;\n    --n_buffers_;\n    // check if this buffer was on top of the memory pool\n    assert(pool_.data() + pool_size_ == ptr);\n    assert(alignment <= 0 || aligned_allocator<T>::get_alignment_padding(pool_size_) == 0);\n    // std::fill(ptr, ptr + size, T{});\n}\n\ntemplate <typename T>\nvoid cosma::memory_pool<T>::resize(size_t capacity) {\n    auto alignment = aligned_allocator<T>::get_alignment();\n    // resizing should always happen after reserve. \n    // The reserve should take care that the reserved\n    // memory is already aligned.\n    assert(alignment <= 0 || aligned_allocator<T>::get_alignment_padding(capacity) == 0);\n\n    this->unpin_all();\n    resized = true;\n    already_pinned = false;\n    try {\n        pool_.resize(capacity);\n    } catch (const std::bad_alloc& e) {\n        std::cout << \"COSMA (memory pool): not enough space. Try setting the CPU memory limit (see environment variable COSMA_CPU_MAX_MEMORY).\" << std::endl;\n        throw;\n    } catch (const std::length_error& e) {\n        std::cout << \"COSMA (memory pool): size >= max_size(). Try setting the CPU memory limit (see environment variable COSMA_CPU_MAX_MEMORY).\" << std::endl;\n        throw;\n    } catch (const std::exception& e) {\n        std::cout << \"COSMA (memory pool): unknown exception, potentially a bug. Please inform us of the test-case.\" << std::endl;\n        throw;\n    }\n    pool_size_ = capacity;\n    pool_capacity_ = capacity;\n}\n\ntemplate <typename T>\nvoid cosma::memory_pool<T>::reset() {\n    pool_size_ = 0;\n    n_buffers_ = 0;\n    this->unpin_all();\n    resized = false;\n    already_pinned = false;\n}\n\ntemplate <typename T>\nT* cosma::memory_pool<T>::get_pool_pointer() {\n    return pool_.data();\n}\n\ntemplate <typename T>\nvoid cosma::memory_pool<T>::turn_on_output() {\n    output = true;\n}\n\ntemplate <typename T>\nsize_t cosma::memory_pool<T>::size() {\n    return pool_size_;\n}\n\ntemplate <typename T>\nvoid cosma::memory_pool<T>::reserve(std::vector<size_t>& buffer_sizes) {\n    auto alignment = aligned_allocator<T>::get_alignment();\n    // total size of all buffers after aligning\n    std::size_t size = 0;\n    for (auto& buffer_size : buffer_sizes) {\n        if (alignment > 0) {\n            buffer_size += aligned_allocator<T>::get_alignment_padding(buffer_size);\n        }\n        size += buffer_size;\n    }\n\n    // reserve a bit more for amortized resizing\n    size = (std::size_t) std::ceil(size * amortization);\n    // take the alignment into account \n    if (alignment > 0) {\n        size += aligned_allocator<T>::get_alignment_padding(size);\n    }\n\n    if (size > 0 && size > pool_capacity_) {\n        pool_capacity_ = size;\n        assert(alignment <= 0 || aligned_allocator<T>::get_alignment_padding(pool_capacity_) == 0);\n        try {\n            pool_.reserve(pool_capacity_);\n        } catch (const std::bad_alloc& e) {\n            std::cout << \"COSMA (memory pool): not enough space. Try setting the CPU memory limit (see environment variable COSMA_CPU_MAX_MEMORY).\" << std::endl;\n            throw;\n        } catch (const std::length_error& e) {\n            std::cout << \"COSMA (memory pool): size >= max_size(). Try setting the CPU memory limit (see environment variable COSMA_CPU_MAX_MEMORY).\" << std::endl;\n            throw;\n        } catch (const std::exception& e) {\n            std::cout << \"COSMA (memory pool): unknown exception, potentially a bug. Please inform us of the test-case.\" << std::endl;\n            throw;\n        }\n    }\n}\n\ntemplate <typename T>\nvoid cosma::memory_pool<T>::pin(T* ptr, std::size_t size) {\n    auto alignment = aligned_allocator<T>::get_alignment();\n    if (alignment > 0) {\n        size += aligned_allocator<T>::get_alignment_padding(size);\n    }\n    // check if it's aligned\n    assert(alignment <=0 || aligned_allocator<T>::get_alignment_padding(size) == 0);\n#ifdef COSMA_HAVE_GPU\n    if (!already_pinned) {\n        pinned_buffers_list.add(ptr, size);\n    }\n#endif\n}\n\ntemplate <typename T>\nvoid cosma::memory_pool<T>::unpin_all() {\n#ifdef COSMA_HAVE_GPU\n    pinned_buffers_list.clear();\n#endif\n}\n\n#if defined(COSMA_WITH_GPU_AWARE_MPI) || defined(COSMA_WITH_NCCL)\ntemplate <typename T>\nvoid cosma::memory_pool<T>::allocate_device_send_buffer(std::size_t size) {\n    device_send_buffer.resize(size);\n}\n\ntemplate <typename T>\nvoid cosma::memory_pool<T>::allocate_device_receive_buffer(std::size_t size) {\n    device_receive_buffer.resize(size);\n}\n#endif\n\ntemplate class cosma::memory_pool<double>;\ntemplate class cosma::memory_pool<float>;\ntemplate class cosma::memory_pool<std::complex<double>>;\ntemplate class cosma::memory_pool<std::complex<float>>;\n"
  },
  {
    "path": "src/cosma/memory_pool.hpp",
    "content": "#pragma once\n#include <vector>\n#include <cosma/pinned_buffers.hpp>\n#include <cosma/aligned_allocator.hpp>\n\n#ifdef COSMA_HAVE_GPU\n#include <Tiled-MM/device_vector.hpp>\n#endif\n\nnamespace cosma {\ntemplate <typename T>\nclass memory_pool {\npublic:\n    using aligned_vector_t = std::vector<T, aligned_allocator<T>>;\n\n    memory_pool();\n\n    ~memory_pool();\n\n    // since vector can resize at some point,\n    // we don't want to return pointer immediately\n    // instead, when a new buffer is requested,\n    // we return the current offset within the pool\n    // that is used as an id of the buffer.\n    // when the buffer actually used, its pointer\n    // can be retrieved with get_buffer_pointer\n    // that takes the buffer id (i.e. its offset within the pool)\n    // and returns its pointer.\n    size_t get_buffer_id(size_t size);\n    T* get_buffer_pointer(size_t id);\n    void free_buffer(T* ptr, size_t size);\n\n    void resize(size_t capacity);\n    void reset();\n\n    T* get_pool_pointer();\n\n    void turn_on_output();\n\n    size_t size();\n    void reserve(std::vector<size_t>& buffer_sizes);\n\n    void pin(T* ptr, std::size_t size);\n    void unpin_all();\n\n    // if true, buffers for this strategy are already pinned\n    bool already_pinned = false;\n    bool resized = false;\n\n    // scaling factor for the buffer growth\n    double amortization;\n\n#if defined(COSMA_WITH_GPU_AWARE_MPI) || defined(COSMA_WITH_NCCL)\n    void allocate_device_send_buffer(std::size_t size);\n    void allocate_device_receive_buffer(std::size_t size);\n\n    gpu::device_vector<T> device_send_buffer;\n    gpu::device_vector<T> device_receive_buffer;\n#endif\n\nprivate:\n    aligned_vector_t pool_;\n    size_t pool_size_ = 0;\n    size_t pool_capacity_ = 0;\n    size_t n_buffers_ = 0;\n#ifdef COSMA_HAVE_GPU\n    pinned_buffers<T> pinned_buffers_list;\n#endif\n    bool output = false;\n};\n}\n"
  },
  {
    "path": "src/cosma/mpi_mapper.hpp",
    "content": "#pragma once\n\n#include <complex>\n#include <mpi.h>\n\nnamespace cosma {\n\n/**\n * Maps a primitive numeric type to a MPI type.\n *\n * @tparam Scalar the numeric type to be mapped\n */\ntemplate <typename Scalar>\nstruct mpi_mapper {\n  static inline MPI_Datatype getType();\n};\n\ntemplate <>\ninline MPI_Datatype mpi_mapper<double>::getType() {\n  return MPI_DOUBLE;\n}\n\ntemplate <>\ninline MPI_Datatype mpi_mapper<float>::getType() {\n  return MPI_FLOAT;\n}\n\ntemplate <>\ninline MPI_Datatype mpi_mapper<std::complex<float>>::getType() {\n  return MPI_C_FLOAT_COMPLEX;\n}\n\ntemplate <>\ninline MPI_Datatype mpi_mapper<std::complex<double>>::getType() {\n  return MPI_C_DOUBLE_COMPLEX;\n}\n\n// Removes const qualifier\n//\ntemplate <typename Scalar>\nstruct mpi_mapper<const Scalar> {\n  static inline MPI_Datatype getType();\n};\n\ntemplate <typename Scalar>\ninline MPI_Datatype mpi_mapper<const Scalar>::getType() {\n  return mpi_mapper<Scalar>::getType();\n}\n\n} // end namespace cosma\n"
  },
  {
    "path": "src/cosma/multiply.cpp",
    "content": "#include <cosma/math_utils.hpp>\n#include <cosma/local_multiply.hpp>\n#include <cosma/multiply.hpp>\n#include <cosma/profiler.hpp>\n#include <costa/grid2grid/ranks_reordering.hpp>\n#include <costa/grid2grid/transformer.hpp>\n\n#include <complex>\n\n#if defined(COSMA_HAVE_GPU) && defined(COSMA_WITH_NCCL)\n#include <cosma/gpu/nccl_utils.hpp>\n#endif\n\n#if defined(COSMA_HAVE_GPU) && defined(COSMA_WITH_GPU_AWARE_MPI)\n#include <cosma/gpu/gpu_aware_mpi_utils.hpp>\n#endif\n\nnamespace cosma {\ntemplate <typename Scalar>\nvoid multiply(cosma_context<Scalar> *ctx,\n              CosmaMatrix<Scalar> &A,\n              CosmaMatrix<Scalar> &B,\n              CosmaMatrix<Scalar> &C,\n              Interval &m,\n              Interval &n,\n              Interval &k,\n              Interval &P,\n              size_t step,\n              const Strategy &strategy,\n              communicator *comm,\n              Scalar alpha,\n              Scalar beta);\n\ntemplate <typename Scalar>\nvoid sequential(cosma_context<Scalar> *ctx,\n                CosmaMatrix<Scalar> &A,\n                CosmaMatrix<Scalar> &B,\n                CosmaMatrix<Scalar> &C,\n                Interval &m,\n                Interval &n,\n                Interval &k,\n                Interval &P,\n                size_t step,\n                const Strategy &strategy,\n                communicator *comm,\n                Scalar alpha,\n                Scalar beta);\n\ntemplate <typename Scalar>\nvoid parallel(cosma_context<Scalar> *ctx,\n              CosmaMatrix<Scalar> &A,\n              CosmaMatrix<Scalar> &B,\n              CosmaMatrix<Scalar> &C,\n              Interval &m,\n              Interval &n,\n              Interval &k,\n              Interval &P,\n              size_t step,\n              const Strategy &strategy,\n              communicator *comm,\n              Scalar alpha,\n              Scalar beta);\n\ntemplate <typename T>\nvoid multiply_using_layout(costa::grid_layout<T> &A,\n                           costa::grid_layout<T> &B,\n                           costa::grid_layout<T> &C,\n                           T alpha,\n                           T beta,\n                           char transa,\n                           char transb,\n                           MPI_Comm comm) {\n    multiply_using_layout<T>(\n        get_context_instance<T>(), A, B, C, alpha, beta, transa, transb, comm);\n}\n\ntemplate <typename T>\nvoid multiply_using_layout(cosma_context<T> *ctx,\n                           costa::grid_layout<T> &A,\n                           costa::grid_layout<T> &B,\n                           costa::grid_layout<T> &C,\n                           T alpha,\n                           T beta,\n                           char transa,\n                           char transb,\n                           MPI_Comm comm) {\n    assert(A.num_cols() == B.num_rows());\n\n    // Note: `k` is always the shared dimension.\n    //\n    int m = A.num_rows();\n    int n = B.num_cols();\n    int k = A.num_cols();\n\n    // **********************************\n    //           CORNER CASES\n    // **********************************\n    // edge cases, which are allowed by the standard\n    if (m == 0 || n == 0) return;\n    // afterwards we are sure m != 0 and n != 0\n    if (k == 0 || alpha == T{0}) {\n        // scale matrix C by beta\n        // starting from (ic-1, jc-1)\n        C.scale_by(beta);\n        return;\n    }\n    if (beta == T{0}) {\n        C.fill(beta);\n    }\n\n    char trans_a = std::toupper(transa);\n    char trans_b = std::toupper(transb);\n\n    int rank, P;\n    MPI_Comm_rank(comm, &rank);\n    MPI_Comm_size(comm, &P);\n\n    // find an optimal strategy for this problem\n    Strategy strategy(m, n, k, P);\n    // enable overlapping communication and computation if turned on\n    if (get_context_instance<T>()->overlap_comm_and_comp) {\n        strategy.enable_overlapping_comm_and_comp();\n    }\n\n    // create COSMA mappers\n    Mapper mapper_a('A', strategy, rank);\n    Mapper mapper_b('B', strategy, rank);\n    Mapper mapper_c('C', strategy, rank);\n\n    // get abstract grid for COSMA layout\n    auto cosma_grid_a = mapper_a.get_layout_grid();\n    auto cosma_grid_b = mapper_b.get_layout_grid();\n    auto cosma_grid_c = mapper_c.get_layout_grid();\n\n    // total communication volume for transformation of layouts\n    auto comm_vol = costa::communication_volume(A.grid, cosma_grid_a, 'N');\n    comm_vol += costa::communication_volume(B.grid, cosma_grid_b, 'N');\n    comm_vol += costa::communication_volume(cosma_grid_c, C.grid, 'N');\n\n    // compute the optimal rank reordering that minimizes the communication\n    // volume\n    bool reordered = false;\n    std::vector<int> rank_permutation =\n        costa::optimal_reordering(comm_vol, P, reordered);\n    // create reordered communicator, which has same ranks\n    // but relabelled as given by the rank_permutation\n    // (to avoid the communication during layout transformation)\n    PE(transform_reordering_comm);\n    MPI_Comm reordered_comm = comm;\n    if (reordered) {\n        MPI_Comm_split(comm, 0, rank_permutation[rank], &reordered_comm);\n    }\n    PL();\n\n    CosmaMatrix<T> A_cosma(ctx, std::move(mapper_a), rank_permutation[rank]);\n    CosmaMatrix<T> B_cosma(ctx, std::move(mapper_b), rank_permutation[rank]);\n    CosmaMatrix<T> C_cosma(ctx, std::move(mapper_c), rank_permutation[rank]);\n\n    // avoid resizing the buffer by reserving immediately the total required memory\n    // collect sizes of all buffers that are going to be allocated for each matrix\n    auto A_buffers = A_cosma.required_memory();\n    auto B_buffers = B_cosma.required_memory();\n    auto C_buffers = C_cosma.required_memory();\n    std::vector<std::size_t> buffer_sizes;\n    int n_buffers = A_buffers.size() + B_buffers.size() + C_buffers.size();\n    if (n_buffers > 0) {\n        buffer_sizes.reserve(n_buffers);\n        std::copy(A_buffers.begin(), A_buffers.end(), std::back_inserter(buffer_sizes));\n        std::copy(B_buffers.begin(), B_buffers.end(), std::back_inserter(buffer_sizes));\n        std::copy(C_buffers.begin(), C_buffers.end(), std::back_inserter(buffer_sizes));\n\n        // allocate all buffers in the memory pool\n        get_context_instance<T>()->get_memory_pool().reserve(buffer_sizes);\n    }\n\n    // get abstract layouts for COSMA layout\n    auto cosma_layout_a = A_cosma.get_grid_layout();\n    auto cosma_layout_b = B_cosma.get_grid_layout();\n    auto cosma_layout_c = C_cosma.get_grid_layout();\n\n    cosma_layout_a.reorder_ranks(rank_permutation);\n    cosma_layout_b.reorder_ranks(rank_permutation);\n    cosma_layout_c.reorder_ranks(rank_permutation);\n\n    // schedule A and B transforms together from given layout to cosma layout\n    costa::transformer<T> transf(comm);\n    transf.schedule(A, cosma_layout_a, trans_a, T{1}, T{0});\n    transf.schedule(B, cosma_layout_b, trans_b, T{1}, T{0});\n    // transform all scheduled transformations together\n    transf.transform();\n\n    // perform cosma multiplication\n    // auto ctx = cosma::make_context<T>();\n    multiply<T>(\n        ctx, A_cosma, B_cosma, C_cosma, strategy, reordered_comm, T{1}, T{0});\n\n    // construct cosma layout again, to avoid outdated\n    // pointers when the memory pool has been used\n    // in case it resized during multiply\n    cosma_layout_c = C_cosma.get_grid_layout();\n    cosma_layout_c.reorder_ranks(rank_permutation);\n    // transform the result back\n    transf.schedule(cosma_layout_c, C, 'N', alpha, beta);\n    transf.transform();\n\n    // free up the reordered communicator\n    PE(transform_reordering_comm);\n    if (reordered) {\n        MPI_Comm_free(&reordered_comm);\n    }\n    PL();\n\n}\n\n/*\n Compute C = alpha*A*B + beta*C\n Assumption: we assume that at each step only 1 dimension is split\n*/\n\n// using the context from matrices\ntemplate <typename Scalar>\nvoid multiply(CosmaMatrix<Scalar> &matrixA,\n              CosmaMatrix<Scalar> &matrixB,\n              CosmaMatrix<Scalar> &matrixC,\n              const Strategy &strategy,\n              MPI_Comm comm,\n              Scalar alpha,\n              Scalar beta) {\n    assert(matrixA.get_context() == matrixB.get_context() &&\n           matrixB.get_context() == matrixC.get_context());\n    multiply(matrixA.get_context(),\n             matrixA,\n             matrixB,\n             matrixC,\n             strategy,\n             comm,\n             alpha,\n             beta);\n}\n\n// using the given context\ntemplate <typename Scalar>\nvoid multiply(cosma_context<Scalar> *ctx,\n              CosmaMatrix<Scalar> &matrixA,\n              CosmaMatrix<Scalar> &matrixB,\n              CosmaMatrix<Scalar> &matrixC,\n              const Strategy &strategy,\n              MPI_Comm comm,\n              Scalar alpha,\n              Scalar beta) {\n    // edge cases, which are allowed by the standard (m, n or k can be 0)\n    if (strategy.m == 0 || strategy.n == 0 || strategy.k == 0) {\n        return;\n    }\n\n    // register reusable objects in the context\n    ctx->register_state(comm, strategy);\n    if (comm == MPI_COMM_NULL || ctx->get_cosma_comm()->is_idle()) {\n\treturn;\n    }\n\n    Interval mi = Interval(0, strategy.m - 1);\n    Interval ni = Interval(0, strategy.n - 1);\n    Interval ki = Interval(0, strategy.k - 1);\n    Interval Pi = Interval(0, strategy.P - 1);\n\n    PE(preprocessing_allocation);\n\n    // allocate buffers used for communication\n    matrixA.allocate_communication_buffers();\n    matrixB.allocate_communication_buffers();\n    matrixC.allocate_communication_buffers();\n\n    // once all buffers are allocated from the memory pool\n    // we know that the memory pool will not be resized\n    // and thus we can safely set the pointer to the\n    // initial buffers in all matrices.\n    matrixA.initialize();\n    matrixB.initialize();\n    matrixC.initialize();\n\n    // check if all the local matrices belong to\n    // the current rank\n    assert(matrixA.rank() == matrixB.rank());\n    assert(matrixB.rank() == matrixC.rank());\n    PL();\n\n    multiply(ctx,\n    \t matrixA,\n    \t matrixB,\n    \t matrixC,\n    \t mi,\n    \t ni,\n    \t ki,\n    \t Pi,\n    \t 0,\n    \t strategy,\n    \t ctx->get_cosma_comm(),\n    \t alpha,\n    \t beta);\n\n    // deallocate buffers used for communication\n    // since its a stack allocator, we deallocate\n    // in the opposite order than when we allocated\n    PE(preprocessing_allocation);\n    matrixC.free_communication_buffers();\n    matrixB.free_communication_buffers();\n    matrixA.free_communication_buffers();\n    PL();\n\n    if (ctx->get_cosma_comm()->rank() == 0) {\n        PP();\n    }\n}\n\ntemplate <typename Scalar>\nvoid multiply(cosma_context<Scalar> *ctx,\n              CosmaMatrix<Scalar> &matrixA,\n              CosmaMatrix<Scalar> &matrixB,\n              CosmaMatrix<Scalar> &matrixC,\n              Interval &m,\n              Interval &n,\n              Interval &k,\n              Interval &P,\n              size_t step,\n              const Strategy &strategy,\n              communicator *comm,\n              Scalar alpha,\n              Scalar beta) {\n    PE(multiply_other);\n#ifdef DEBUG\n    std::cout << \"matrix A, buffer index = \" << matrixA.buffer_index()\n              << std::endl;\n    std::cout << \"matrix B, buffer index = \" << matrixB.buffer_index()\n              << std::endl;\n    std::cout << \"matrix C, buffer index = \" << matrixC.buffer_index()\n              << std::endl;\n#endif\n\n    // current submatrices that are being computed\n    Interval2D a_range(m, k);\n    Interval2D b_range(k, n);\n    Interval2D c_range(m, n);\n\n    // For each of P processors remember which sequential bucket we are\n    // currently on\n    std::vector<int> bucketA = matrixA.seq_buckets(P);\n    std::vector<int> bucketB = matrixB.seq_buckets(P);\n    std::vector<int> bucketC = matrixC.seq_buckets(P);\n\n    // Skip all buckets that are \"before\" the current submatrices.\n    // the relation submatrix1 <before> submatrix2 is defined in Interval2D.\n    // Intuitively, this will skip all the buckets that are \"above\" or \"on the\n    // left\" of the current submatrices. We say \"before\" because whenever we\n    // split sequentially, we always first start with the \"above\" submatrix (if\n    // the splitting is horizontal) or with the left one (if the splitting is\n    // vertical). which explains the name of the relation \"before\".\n    matrixA.update_buckets(P, a_range);\n    matrixB.update_buckets(P, b_range);\n    matrixC.update_buckets(P, c_range);\n\n    // This iterates over the skipped buckets and sums up their sizes,\n    // and increases the pointer of the current matrix for the offset\n    int offsetA = matrixA.shift(bucketA[comm->relative_rank(P)]);\n    int offsetB = matrixB.shift(bucketB[comm->relative_rank(P)]);\n    int offsetC = matrixC.shift(bucketC[comm->relative_rank(P)]);\n    PL();\n\n    if (strategy.final_step(step) || strategy.empty()) {\n        bool copy_c_back = true;\n        bool nccl_enabled = false;\n        bool gpu_aware_mpi_enabled = false;\n\n#ifdef COSMA_WITH_NCCL\n        nccl_enabled = true;\n#endif\n#ifdef COSMA_WITH_GPU_AWARE_MPI\n        gpu_aware_mpi_enabled = true;\n#endif\n\n        if (gpu_aware_mpi_enabled || nccl_enabled) {\n            copy_c_back = !(step > 0 && strategy.parallel_step(step-1) && strategy.split_k(step-1));\n        }\n\n        local_multiply(ctx,\n                       matrixA.current_matrix(),\n                       matrixB.current_matrix(),\n                       matrixC.current_matrix(),\n                       m.length(),\n                       n.length(),\n                       k.length(),\n                       alpha,\n                       beta,\n                       copy_c_back);\n    } else {\n        if (strategy.parallel_step(step)) {\n            if (strategy.should_overlap_comm_and_comp(step)) {\n                comm->overlap_comm_and_comp(ctx,\n                                           matrixA,\n                                           matrixB,\n                                           matrixC,\n                                           m,\n                                           n,\n                                           k,\n                                           P,\n                                           step,\n                                           alpha,\n                                           beta);\n                // parallel(matrixA, matrixB, matrixC, m, n, k, P, step,\n                // strategy, comm, beta);\n            } else {\n                parallel(ctx,\n                         matrixA,\n                         matrixB,\n                         matrixC,\n                         m,\n                         n,\n                         k,\n                         P,\n                         step,\n                         strategy,\n                         comm,\n                         alpha,\n                         beta);\n            }\n        } else {\n            sequential(ctx,\n                       matrixA,\n                       matrixB,\n                       matrixC,\n                       m,\n                       n,\n                       k,\n                       P,\n                       step,\n                       strategy,\n                       comm,\n                       alpha,\n                       beta);\n        }\n    }\n\n    PE(multiply_other);\n    // shift the pointers of the current matrix back\n    matrixA.unshift(offsetA);\n    matrixB.unshift(offsetB);\n    matrixC.unshift(offsetC);\n\n    // Revert the buckets pointers to their previous values.\n    matrixA.set_seq_buckets(P, bucketA);\n    matrixB.set_seq_buckets(P, bucketB);\n    matrixC.set_seq_buckets(P, bucketC);\n    PL();\n}\n\n/*\n In each sequential step, one of the dimensions is split,\n and each of the subproblems is solved sequentially by all P processors.\n*/\ntemplate <typename Scalar>\nvoid sequential(cosma_context<Scalar> *ctx,\n                CosmaMatrix<Scalar> &matrixA,\n                CosmaMatrix<Scalar> &matrixB,\n                CosmaMatrix<Scalar> &matrixC,\n                Interval &m,\n                Interval &n,\n                Interval &k,\n                Interval &P,\n                size_t step,\n                const Strategy &strategy,\n                communicator *comm,\n                Scalar alpha,\n                Scalar beta) {\n    // split the dimension but not the processors, all P processors are taking\n    // part in each substep.\n    if (strategy.split_m(step)) {\n        for (int M = 0; M < strategy.divisor(step); ++M) {\n            Interval newm = m.subinterval(strategy.divisor(step), M);\n            multiply(ctx,\n                     matrixA,\n                     matrixB,\n                     matrixC,\n                     newm,\n                     n,\n                     k,\n                     P,\n                     step + 1,\n                     strategy,\n                     comm,\n                     alpha,\n                     beta);\n            // this only affects the GPU backend.\n            // if sequential steps are used, then each sequential step \n            // is reusing the same communication buffers. \n            // However, if the strategy contains steps \n            // which are not perfectly divisible then\n            // this might result in each sequential step requiring\n            // slightly different pointers to be pinned and thus\n            // we cannot reuse the already pinned buffers from\n            // the previous sequential step. We have to unpin\n            // all the buffers from the previous step, to avoid\n            // getting the GPU runtime error that \n            // some part of the buffer is already pinned.\n            if (strategy.irregular) {\n                ctx->get_memory_pool().unpin_all();\n            }\n        }\n        return;\n    }\n\n    if (strategy.split_n(step)) {\n        for (int N = 0; N < strategy.divisor(step); ++N) {\n            Interval newn = n.subinterval(strategy.divisor(step), N);\n            multiply(ctx,\n                     matrixA,\n                     matrixB,\n                     matrixC,\n                     m,\n                     newn,\n                     k,\n                     P,\n                     step + 1,\n                     strategy,\n                     comm,\n                     alpha,\n                     beta);\n            // this only affects the GPU backend.\n            // if sequential steps are used, then each sequential step \n            // is reusing the same communication buffers. \n            // However, if the strategy contains steps \n            // which are not perfectly divisible then\n            // this might result in each sequential step requiring\n            // slightly different pointers to be pinned and thus\n            // we cannot reuse the already pinned buffers from\n            // the previous sequential step. We have to unpin\n            // all the buffers from the previous step, to avoid\n            // getting the GPU runtime error that \n            // some part of the buffer is already pinned.\n            if (strategy.irregular) {\n                ctx->get_memory_pool().unpin_all();\n            }\n        }\n        return;\n    }\n\n    // if divided by k, then the result of each subproblem is just a partial\n    // result for C which should all be summed up. We solve this by letting beta\n    // parameter be 1 in the substeps that follow so that dgemm automatically\n    // adds up the subsequent results to the previous partial results of C.\n    if (strategy.split_k(step)) {\n        for (int K = 0; K < strategy.divisor(step); ++K) {\n            Interval newk = k.subinterval(strategy.divisor(step), K);\n            auto new_beta = beta;\n            if (K != 0) {\n                new_beta = Scalar{1};\n            }\n            multiply(ctx,\n                     matrixA,\n                     matrixB,\n                     matrixC,\n                     m,\n                     n,\n                     newk,\n                     P,\n                     step + 1,\n                     strategy,\n                     comm,\n                     alpha,\n                     new_beta);\n            // this only affects the GPU backend.\n            // if sequential steps are used, then each sequential step \n            // is reusing the same communication buffers. \n            // However, if the strategy contains steps \n            // which are not perfectly divisible then\n            // this might result in each sequential step requiring\n            // slightly different pointers to be pinned and thus\n            // we cannot reuse the already pinned buffers from\n            // the previous sequential step. We have to unpin\n            // all the buffers from the previous step, to avoid\n            // getting the GPU runtime error that \n            // some part of the buffer is already pinned.\n            if (strategy.irregular) {\n                ctx->get_memory_pool().unpin_all();\n            }\n        }\n        return;\n    }\n}\n\ntemplate <typename T>\nT which_is_expanded(T &&A,\n                    T &&B,\n                    T &&C,\n                    const Strategy &strategy,\n                    size_t step) {\n    // divn > 1 => divm==divk==1 => matrix A has not been splitted\n    // therefore it is expanded (in the communication of a parallel step)\n    if (strategy.split_n(step))\n        return std::forward<T>(A);\n\n    // divm > 1 => divk==divn==1 => matrix B has not been splitted\n    // therefore it is expanded (in the communication of a parallel step)\n    if (strategy.split_m(step))\n        return std::forward<T>(B);\n\n    // divk > 1 => divm==divn==1 => matrix C has not been splitted\n    // therefore it is expanded (in the communication of a parallel step)\n    if (strategy.split_k(step))\n        return std::forward<T>(C);\n\n    // this should never happen\n    return std::forward<T>(C);\n}\n\n/*\n In a parallel step one of the dimensions is split into \"div\" pieces.\n Also, ranks P are split into \"div\" groups of \"newP\" processors\n such that each group of ranks is in charge of one piece of the split matrix.\n\n * if m split:\n Split matrix A and copy matrix B such that each of the \"div\" groups with newP\n processors contains the whole matrix B (that was previously owned by P\n processors). Communication is necessary since we want that newP<P processors\n own what was previously owned by P processors After the communication, each\n group of processors will contain identical data of matrix B in exactly the same\n order in all groups.\n\n * if n split:\n Split matrix B and copy matrix A such that each of the \"div\" groups with newP\n processors contains the whole matrix A (that was previously owned by P\n processors). Communication is necessary since we want that newP<P processors\n own what was previously owned by P processors After the communication, each\n group of processors will contain identical data of matrix A in exactly the same\n order in all groups.\n\n * if k split:\n Split both matrices A and B (since both of these matrices own dimension k).\n After the substep, each of \"div\" groups with newP processors will own\n a partial result of matrix C which should all be summed up (reduced) and\n splitted equally among all P processors. Thus, here we first sum up all the\n partial results that are owned by newP processors, and then we split it equally\n among P processors. While in the previous two cases we had to expand local\n matrices (since newP processors had to own what was previously owned by P\n processors) here we have the opposite - P ranks should own what was previously\n owned by newP ranks - thus local matrices are shrinked.\n */\ntemplate <typename Scalar>\nvoid parallel(cosma_context<Scalar> *ctx,\n              CosmaMatrix<Scalar> &matrixA,\n              CosmaMatrix<Scalar> &matrixB,\n              CosmaMatrix<Scalar> &matrixC,\n              Interval &m,\n              Interval &n,\n              Interval &k,\n              Interval &P,\n              size_t step,\n              const Strategy &strategy,\n              communicator *comm,\n              Scalar alpha,\n              Scalar beta) {\n    PE(multiply_other);\n    int divisor = strategy.divisor(step);\n    int divisor_m = strategy.divisor_m(step);\n    int divisor_n = strategy.divisor_n(step);\n    int divisor_k = strategy.divisor_k(step);\n    // processor subinterval which the current rank belongs to\n    int partition_idx = P.subinterval_index(divisor, comm->rank());\n    Interval newP = P.subinterval(divisor, partition_idx);\n    // intervals of M, N and K that the current rank is in charge of,\n    // together with other ranks from its group.\n    // (see the definition of group and offset below)\n    Interval newm = m.subinterval(divisor_m, divisor_m > 1 ? partition_idx : 0);\n    Interval newn = n.subinterval(divisor_n, divisor_n > 1 ? partition_idx : 0);\n    Interval newk = k.subinterval(divisor_k, divisor_k > 1 ? partition_idx : 0);\n\n    /*\n     * size_before_expansion:\n     maps rank i from interval P to the vector [bucket1.size(),\n     bucket2.size()...] containing buckets which are inside \"range\" that rank i\n     owns\n\n     * total_before_expansion:\n     maps rank i from interval P to the sum of all buckets inside\n     size_before_expansion[i]\n\n     * size_after_expansion:\n     maps rank i from interval newP to the vector of [bucket1.size(),\n     bucket2.size()...] but each bucket here is expanded, i.e. each bucket size\n     in this vector is actually the sum of the sizes of this bucket in all the\n     ranks from the communication ring of the current rank.\n\n     * total_after_expansion:\n     maps rank i from interval P to the sum of all buckets inside\n     size_after_expansion[i]\n     */\n    std::vector<std::vector<int>> size_before_expansion(P.length());\n    std::vector<int> total_before_expansion(P.length());\n    std::vector<std::vector<int>> size_after_expansion(newP.length());\n    std::vector<int> total_after_expansion(newP.length());\n\n    /*\n     * this gives us the 2D interval of the matrix that will be expanded:\n     if divm > 1 => matrix B expanded => Interval2D(k, n)\n     if divn > 1 => matrix A expanded => Interval2D(m, k)\n     if divk > 1 => matrix C expanded => Interval2D(m, n)\n     */\n    Interval row_copy = which_is_expanded(m, k, m, strategy, step);\n    Interval col_copy = which_is_expanded(k, n, n, strategy, step);\n    Interval2D range(row_copy, col_copy);\n\n    /*\n     * this gives us a matrix that is expanded:\n     if divm > 1 => matrix B is expanded\n     if divn > 1 => matrix A is expanded\n     if divk > 1 => matrix C is expanded\n     */\n    CosmaMatrix<Scalar> &expanded_mat =\n        which_is_expanded(matrixA, matrixB, matrixC, strategy, step);\n    // gets the buffer sizes before and after expansion.\n    // this still does not modify the buffer sizes inside layout\n    // it just tells us what they would be.\n    expanded_mat.buffers_before_expansion(\n        P, range, size_before_expansion, total_before_expansion);\n\n    expanded_mat.buffers_after_expansion(P,\n                                         newP,\n                                         size_before_expansion,\n                                         total_before_expansion,\n                                         size_after_expansion,\n                                         total_after_expansion);\n\n    // increase the buffer sizes before the substeps\n    expanded_mat.set_sizes(newP, size_after_expansion);\n    // this is the sum of sizes of all the buckets after expansion\n    // that the current rank will own.\n    // which is also the size of the matrix after expansion\n    int new_size = total_after_expansion[comm->relative_rank(newP)];\n\n    int buffer_idx = expanded_mat.buffer_index();\n    expanded_mat.advance_buffer();\n\n    Scalar *original_matrix = expanded_mat.current_matrix();\n    Scalar *expanded_matrix = expanded_mat.buffer_ptr();\n    Scalar *reshuffle_buffer = expanded_mat.reshuffle_buffer_ptr();\n\n    // pack the data for the next substep\n    expanded_mat.set_current_matrix(expanded_matrix);\n    PL();\n\n    // if divided along m or n then copy original matrix inside communication\n    // ring to get the expanded matrix (all ranks inside communication ring\n    // should own exactly the same data in the expanded matrix.\n    if (strategy.split_m(step) || strategy.split_n(step)) {\n        // copy the matrix that wasn't divided in this step\n#ifdef COSMA_WITH_NCCL\n        cosma::gpu::nccl_copy(ctx,\n                              P,\n                              original_matrix,\n                              expanded_matrix,\n                              reshuffle_buffer,\n                              size_before_expansion,\n                              total_before_expansion,\n                              new_size,\n                              step);\n#elif COSMA_WITH_GPU_AWARE_MPI\n        cosma::gpu::gpu_aware_mpi_copy(\n                              ctx,\n                              P,\n                              original_matrix,\n                              expanded_matrix,\n                              reshuffle_buffer,\n                              size_before_expansion,\n                              total_before_expansion,\n                              new_size,\n                              step);\n#else\n        comm->copy(P,\n                  original_matrix,\n                  expanded_matrix,\n                  reshuffle_buffer,\n                  size_before_expansion,\n                  total_before_expansion,\n                  new_size,\n                  step);\n#endif\n    }\n\n    // if division by k, and we are in the branch where beta > 0, then\n    // reset beta to 0, but keep in mind that on the way back from the substeps\n    // we will have to sum the result with the local data in C\n    // this is necessary since reduction happens AFTER the substeps\n    // so we cannot pass beta = 1 if the data is not present there BEFORE the\n    // substeps.\n    auto new_beta = beta;\n    if (strategy.split_k(step) && beta != Scalar{0}) {\n        new_beta = Scalar{0};\n\n        //*******************************************\n        // swaping reduce_buffer and original_matrix\n        //*******************************************\n        /* Why this is necessary:\n           Assume the case: (m, n, k, P) = (4, 4, 8, 4),\n           with strategy being: -s pk2,pk2 and beta = 1.0\n\n           In this case, matrix C will only allocate 3 buffers: \n           1) initial buffer (send buffer)\n           2) communication buffer (receive buffer)\n           3) reduce_buffer (temporary buffer)\n\n           In the first parallel step, the following will happen:\n           1) beta = 1.0, but new_beta = 0.0;\n           2) buffer_index will point to communication buffer\n\n           In the second parallel step:\n           1) beta = 0.0\n           2) initial and communication buffers will swap:\n           the communication buffer will become the new send buffer\n           and the initial buffer will become the new receive buffer\n\n           This means that the initial buffer will be overwritten\n           with partial results of the nested parallel k/2 step.\n\n           Then, when the outer parallel step wants to accumulate: \n           C = beta * C + sum(partial results)\n           However, the values in C are not anymore the initial values, \n           but the values of the inner reduction which overwrote C.\n\n           This happens when all following conditions are met:\n           1) beta > 0:\n                When beta == 0, then we can easily overwite C with\n                intermediate results, without worring about loosing the data.\n           2) There are no sequential m or n divisions before parallel k steps:\n                When sequential m or n divisions occur, then they\n                enforce new buffers to be used in subsequent communications\n                and thus the initial values in C stay preserved.\n           3) Parallel k division occurs more than once:\n                If parallel k step occurs only once, then\n                buffer swapping never occurs, since we only\n                have one communication round of matrix C,\n                so the initial buffer only serves as the send buffer.\n\n           When none of these conditions are met, we have to:\n           - either: \n             1) allocate an additional communication buffer\n                so that the initial buffer never gets written to\n                during communication rounds\n           - or:\n             2) preserve the initial content of C temporarily\n                in a temporary buffer (e.g. in a reduce_buffer)\n                given that the temp buffer is not used \n                in any subsequent step.\n\n           Here we chose to take the option 2).\n\n           We temporarily swap the original_matrix\n           (possibly containing the initial values of C)\n           with the reduce buffer that is not used in nested steps.\n\n           We can do this, because the following is guaranteed:\n           1) if beta > 0 in some parallel step where k is divided\n              then all nested steps (both parallel and sequential) \n              will have beta = 0 (since we set new_beta = 0).\n           2) size(reduce_buffer) >= size(initial C buffer)\n           3) Even if there is only 1 parallel k step\n              (and thus the stated problem is not present)\n              swapping these buffers will not hurt.\n        */\n        expanded_mat.swap_reduce_buffer_with(buffer_idx);\n    }\n\n    multiply(ctx,\n             matrixA,\n             matrixB,\n             matrixC,\n             newm,\n             newn,\n             newk,\n             newP,\n             step + 1,\n             strategy,\n             comm,\n             alpha,\n             new_beta);\n\n#ifdef DEBUG\n    std::cout << \"rank = \" << comm->rank() << \", label = \" << expanded_mat.label() << std::endl;\n    if (comm->rank() == 0 && expanded_mat.label() == 'C') {\n        std::cout << \"expanded matrix after multiply: \" << std::endl;\n        int local_size = size_before_expansion[comm->rank() - P.first()][0];\n        for (int i = 0; i < local_size; ++i) {\n            std::cout << *(expanded_mat.current_matrix() + i) << \", \";\n        }\n        std::cout << std::endl;\n        std::cout << \"buff_idx = \" << buffer_idx << std::endl;\n    }\n#endif\n\n    // revert changes after the recurrsion\n    if (strategy.split_k(step) && beta != Scalar{0}) {\n        // swap reduce_buffer with original matrix C\n        expanded_mat.swap_reduce_buffer_with(buffer_idx);\n    }\n\n    // revert the current matrix\n    expanded_mat.set_buffer_index(buffer_idx);\n    expanded_mat.set_current_matrix(original_matrix);\n\n    // if division by k do additional reduction of C\n    if (strategy.split_k(step)) {\n        Scalar *reduce_buffer = expanded_mat.reduce_buffer_ptr();\n#ifdef COSMA_WITH_NCCL\n        bool copy_c_back = !strategy.final_step(step+1);\n        cosma::gpu::nccl_reduce(ctx,\n                                P,\n                                expanded_matrix,\n                                original_matrix,\n                                reshuffle_buffer,\n                                reduce_buffer,\n                                size_before_expansion,\n                                total_before_expansion,\n                                size_after_expansion,\n                                total_after_expansion,\n                                beta,\n                                step,\n                                copy_c_back);\n#elif COSMA_WITH_GPU_AWARE_MPI\n        bool copy_c_back = !strategy.final_step(step+1);\n        cosma::gpu::gpu_aware_mpi_reduce(\n                                ctx,\n                                P,\n                                expanded_matrix,\n                                original_matrix,\n                                reshuffle_buffer,\n                                reduce_buffer,\n                                size_before_expansion,\n                                total_before_expansion,\n                                size_after_expansion,\n                                total_after_expansion,\n                                beta,\n                                step,\n                                copy_c_back);\n#else\n        comm->reduce(P,\n                    expanded_matrix,\n                    original_matrix,\n                    reshuffle_buffer,\n                    reduce_buffer,\n                    size_before_expansion,\n                    total_before_expansion,\n                    size_after_expansion,\n                    total_after_expansion,\n                    alpha,\n                    beta,\n                    step);\n#endif\n    }\n\n    PE(multiply_other);\n    // after the memory is freed, the buffer sizes are back to the previous\n    // values (the values at the beginning of this parallel step)\n    expanded_mat.set_sizes(\n        newP, size_before_expansion, newP.first() - P.first());\n    PL();\n}\n\nusing zfloat_t = std::complex<float>;\nusing zdouble_t = std::complex<double>;\n\n// explicit instantiation for multiply_using_layout without context\ntemplate void multiply_using_layout<double>(costa::grid_layout<double> &A,\n                                            costa::grid_layout<double> &B,\n                                            costa::grid_layout<double> &C,\n                                            double alpha,\n                                            double beta,\n                                            char transa,\n                                            char transb,\n                                            MPI_Comm comm);\n\ntemplate void multiply_using_layout<float>(costa::grid_layout<float> &A,\n                                           costa::grid_layout<float> &B,\n                                           costa::grid_layout<float> &C,\n                                           float alpha,\n                                           float beta,\n                                           char transa,\n                                           char transb,\n                                           MPI_Comm comm);\n\ntemplate void\nmultiply_using_layout<zdouble_t>(costa::grid_layout<zdouble_t> &A,\n                                 costa::grid_layout<zdouble_t> &B,\n                                 costa::grid_layout<zdouble_t> &C,\n                                 zdouble_t alpha,\n                                 zdouble_t beta,\n                                 char transa,\n                                 char transb,\n                                 MPI_Comm comm);\n\ntemplate void\nmultiply_using_layout<zfloat_t>(costa::grid_layout<zfloat_t> &A,\n                                costa::grid_layout<zfloat_t> &B,\n                                costa::grid_layout<zfloat_t> &C,\n                                zfloat_t alpha,\n                                zfloat_t beta,\n                                char transa,\n                                char transb,\n                                MPI_Comm comm);\n\n// explicit instantiation for multiply_using_layout with context\ntemplate void multiply_using_layout<double>(cosma_context<double> *ctx,\n                                            costa::grid_layout<double> &A,\n                                            costa::grid_layout<double> &B,\n                                            costa::grid_layout<double> &C,\n                                            double alpha,\n                                            double beta,\n                                            char transa,\n                                            char transb,\n                                            MPI_Comm comm);\n\ntemplate void multiply_using_layout<float>(cosma_context<float> *ctx,\n                                           costa::grid_layout<float> &A,\n                                           costa::grid_layout<float> &B,\n                                           costa::grid_layout<float> &C,\n                                           float alpha,\n                                           float beta,\n                                           char transa,\n                                           char transb,\n                                           MPI_Comm comm);\n\ntemplate void\nmultiply_using_layout<zdouble_t>(cosma_context<zdouble_t> *ctx,\n                                 costa::grid_layout<zdouble_t> &A,\n                                 costa::grid_layout<zdouble_t> &B,\n                                 costa::grid_layout<zdouble_t> &C,\n                                 zdouble_t alpha,\n                                 zdouble_t beta,\n                                 char transa,\n                                 char transb,\n                                 MPI_Comm comm);\n\ntemplate void\nmultiply_using_layout<zfloat_t>(cosma_context<zfloat_t> *ctx,\n                                costa::grid_layout<zfloat_t> &A,\n                                costa::grid_layout<zfloat_t> &B,\n                                costa::grid_layout<zfloat_t> &C,\n                                zfloat_t alpha,\n                                zfloat_t beta,\n                                char transa,\n                                char transb,\n                                MPI_Comm comm);\n\n// Explicit instantiations for short `multiply`\ntemplate void multiply<double>(cosma_context<double> *ctx,\n                               CosmaMatrix<double> &A,\n                               CosmaMatrix<double> &B,\n                               CosmaMatrix<double> &C,\n                               const Strategy &strategy,\n                               MPI_Comm comm,\n                               double alpha,\n                               double beta);\n\ntemplate void multiply<float>(cosma_context<float> *ctx,\n                              CosmaMatrix<float> &A,\n                              CosmaMatrix<float> &B,\n                              CosmaMatrix<float> &C,\n                              const Strategy &strategy,\n                              MPI_Comm comm,\n                              float alpha,\n                              float beta);\n\ntemplate void multiply<zdouble_t>(cosma_context<zdouble_t> *ctx,\n                                  CosmaMatrix<zdouble_t> &A,\n                                  CosmaMatrix<zdouble_t> &B,\n                                  CosmaMatrix<zdouble_t> &C,\n                                  const Strategy &strategy,\n                                  MPI_Comm comm,\n                                  zdouble_t alpha,\n                                  zdouble_t beta);\n\ntemplate void multiply<zfloat_t>(cosma_context<zfloat_t> *ctx,\n                                 CosmaMatrix<zfloat_t> &A,\n                                 CosmaMatrix<zfloat_t> &B,\n                                 CosmaMatrix<zfloat_t> &C,\n                                 const Strategy &strategy,\n                                 MPI_Comm comm,\n                                 zfloat_t alpha,\n                                 zfloat_t beta);\n\n// Explicit instantiations for short `multiply` without the context\n//\ntemplate void multiply<double>(CosmaMatrix<double> &A,\n                               CosmaMatrix<double> &B,\n                               CosmaMatrix<double> &C,\n                               const Strategy &strategy,\n                               MPI_Comm comm,\n                               double alpha,\n                               double beta);\n\ntemplate void multiply<float>(CosmaMatrix<float> &A,\n                              CosmaMatrix<float> &B,\n                              CosmaMatrix<float> &C,\n                              const Strategy &strategy,\n                              MPI_Comm comm,\n                              float alpha,\n                              float beta);\n\ntemplate void multiply<zdouble_t>(CosmaMatrix<zdouble_t> &A,\n                                  CosmaMatrix<zdouble_t> &B,\n                                  CosmaMatrix<zdouble_t> &C,\n                                  const Strategy &strategy,\n                                  MPI_Comm comm,\n                                  zdouble_t alpha,\n                                  zdouble_t beta);\n\ntemplate void multiply<zfloat_t>(CosmaMatrix<zfloat_t> &A,\n                                 CosmaMatrix<zfloat_t> &B,\n                                 CosmaMatrix<zfloat_t> &C,\n                                 const Strategy &strategy,\n                                 MPI_Comm comm,\n                                 zfloat_t alpha,\n                                 zfloat_t beta);\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/multiply.hpp",
    "content": "#pragma once\n\n#include <cosma/communicator.hpp>\n#include <cosma/context.hpp>\n#include <cosma/interval.hpp>\n#include <cosma/matrix.hpp>\n#include <cosma/strategy.hpp>\n\n#include <mpi.h>\n\n#include <costa/grid2grid/transform.hpp>\n\nnamespace cosma {\n\n/*\n * Performs matrix multiplication: C = alpha*op(A)*op(B) + beta*C,\n * where alpha and beta are scalars and op can be:\n * no-transpose ('N'), transpose ('T') or transpose and conjugate ('C')\n */\n\n/*\n * Takes matrices given in an arbitrary grid-like data layouts\n * COSTA represents the abstract representation of the target layout\n * where target layout is the initial data layout for matrices A and B\n * and the final data layout for matrix C.\n * this function will perform the transformations between the target\n * layouts and the optimal COSMA layout and perform the multiplication.\n * it is not as efficient as using the native COSMA layout,\n * but is very general as it can work with any grid-like data layout.\n */\ntemplate <typename Scalar>\nvoid multiply_using_layout(costa::grid_layout<Scalar> &A_layout,\n                           costa::grid_layout<Scalar> &B_layout,\n                           costa::grid_layout<Scalar> &C_layout,\n                           Scalar alpha,\n                           Scalar beta,\n                           char transa,\n                           char transb,\n                           MPI_Comm comm);\n\n/*\n * Takes matrices in the optimal COSMA layout and the division strategy\n * and performs the multiplication. It is very efficient as it uses the\n * optimal COSMA layout.\n */\n\ntemplate <typename Scalar>\nvoid multiply(CosmaMatrix<Scalar> &A,\n              CosmaMatrix<Scalar> &B,\n              CosmaMatrix<Scalar> &C,\n              const Strategy &strategy,\n              MPI_Comm comm,\n              Scalar alpha,\n              Scalar beta);\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/one_sided_communicator.cpp",
    "content": "#include <cosma/one_sided_communicator.hpp>\n\n#include <cosma/local_multiply.hpp>\n#include <cosma/math_utils.hpp>\n#include <cosma/mpi_mapper.hpp>\n#include <cosma/profiler.hpp>\n\n#include <algorithm>\n#include <atomic>\n#include <chrono>\n#include <complex>\n#include <condition_variable>\n#include <cstring>\n#include <future>\n#include <iostream>\n#include <mutex>\n#include <stdlib.h>\n#include <thread>\n#include <tuple>\n#include <vector>\n\nnamespace cosma {\n\nnamespace one_sided_communicator {\n\ntemplate <typename Scalar>\nMPI_Win\ncreate_window(MPI_Comm comm, Scalar *pointer, size_t size, bool no_locks) {\n    MPI_Info info;\n    MPI_Info_create(&info);\n    if (no_locks) {\n        MPI_Info_set(info, \"no_locks\", \"true\");\n    } else {\n        MPI_Info_set(info, \"no_locks\", \"false\");\n    }\n    MPI_Info_set(info, \"accumulate_ops\", \"same_op\");\n    MPI_Info_set(info, \"accumulate_ordering\", \"none\");\n\n    MPI_Win win;\n    MPI_Win_create(\n        pointer, size * sizeof(Scalar), sizeof(Scalar), info, comm, &win);\n\n    MPI_Info_free(&info);\n\n    return win;\n}\n\ntemplate <typename Scalar>\nvoid copy(MPI_Comm comm,\n          int rank,\n          int div,\n          Interval &P,\n          Scalar *in,\n          Scalar *out,\n          Scalar *reshuffle_buffer,\n          std::vector<std::vector<int>> &size_before,\n          std::vector<int> &total_before,\n          int total_after) {\n    int gp, off;\n    std::tie(gp, off) = P.locate_in_subinterval(div, rank);\n\n    int relative_rank = rank - P.first();\n    int local_size = total_before[relative_rank];\n\n    MPI_Win win = create_window(comm, in, local_size, true);\n    MPI_Win_fence(MPI_MODE_NOPRECEDE + MPI_MODE_NOPUT, win);\n\n    int n_blocks = size_before[relative_rank].size();\n    std::vector<int> rank_offset(div);\n\n    auto mpi_type = mpi_mapper<Scalar>::getType();\n    int displacement = 0;\n    for (int block = 0; block < n_blocks; block++) {\n        for (int rank = 0; rank < div; ++rank) {\n            int target = P.locate_in_interval(div, rank, off);\n            int b_size = size_before[target][block];\n\n            MPI_Get(out + displacement,\n                    b_size,\n                    mpi_type,\n                    rank,\n                    rank_offset[rank],\n                    b_size,\n                    mpi_type,\n                    win);\n\n            rank_offset[rank] += b_size;\n            displacement += b_size;\n        }\n    }\n\n    MPI_Win_fence(MPI_MODE_NOSUCCEED, win);\n    MPI_Win_free(&win);\n\n#ifdef DEBUG\n    std::cout << \"Content of the copied matrix in rank \" << rank\n              << \" is now: \" << std::endl;\n    for (int j = 0; j < rank_offset[gp]; j++) {\n        std::cout << out[j] << \", \";\n    }\n    std::cout << std::endl;\n#endif\n}\n\ntemplate <typename Scalar>\nvoid reduce(MPI_Comm comm,\n            int rank,\n            int div,\n            Interval &P,\n            Scalar *in,\n            Scalar *out,\n            Scalar *reshuffle_buffer,\n            Scalar *reduce_buffer,\n            std::vector<std::vector<int>> &c_current,\n            std::vector<int> &c_total_current,\n            std::vector<std::vector<int>> &c_expanded,\n            std::vector<int> &c_total_expanded,\n            Scalar beta) {\n    auto mpi_type = mpi_mapper<Scalar>::getType();\n    // int div = strategy_->divisor(step);\n    // int gp, off;\n    // std::tie(gp, off) = group_and_offset(P, div);\n    int gp, off;\n    std::tie(gp, off) = P.locate_in_subinterval(div, rank);\n\n    int n_blocks = c_expanded[off].size();\n\n    int target = P.locate_in_interval(div, gp, off);\n    int local_size = c_total_current[target];\n\n    // initilize C to 0 if beta = 0 since accumulate will do additions over\n    // this array\n    if (beta == Scalar{0}) {\n        memset(out, 0, local_size * sizeof(Scalar));\n    }\n\n    MPI_Win win = create_window(comm, out, local_size, true);\n    MPI_Win_fence(MPI_MODE_NOPRECEDE + MPI_MODE_NOSTORE, win);\n\n    int displacement = 0;\n    std::vector<int> rank_offset(div);\n    // go through the communication ring\n    for (int block = 0; block < n_blocks; ++block) {\n        for (int i = 0; i < div; ++i) {\n            int target = P.locate_in_interval(div, i, off);\n            int b_size = c_current[target][block];\n\n            MPI_Accumulate(in + displacement,\n                           b_size,\n                           mpi_type,\n                           i,\n                           rank_offset[i],\n                           b_size,\n                           mpi_type,\n                           MPI_SUM,\n                           win);\n\n            displacement += b_size;\n            rank_offset[i] += b_size;\n        }\n    }\n\n    MPI_Win_fence(MPI_MODE_NOSUCCEED, win);\n    MPI_Win_free(&win);\n}\n\ntemplate <typename Scalar>\nvoid comm_task_mn_split_polling(int divisor,\n                                int gp,\n                                Scalar *original_matrix,\n                                Scalar *expanded_matrix,\n                                Interval m,\n                                Interval k,\n                                std::vector<int> &displacements,\n                                std::atomic_int &ready,\n                                MPI_Comm comm) {\n    PE(multiply_communication_other);\n    // copy the matrix that wasn't divided in this step\n    int local_size = m.length() * k.subinterval(divisor, gp).length();\n\n    auto mpi_type = mpi_mapper<Scalar>::getType();\n    MPI_Win win = create_window(comm, original_matrix, local_size, false);\n    // MPI_Comm mpi_comm = comm.active_comm(step);\n    MPI_Win_lock_all(MPI_MODE_NOCHECK, win);\n\n    int dist = 1;\n    while (dist < divisor) {\n        int rank = (gp + dist) % divisor;\n        int b_size = m.length() * k.subinterval(divisor, rank).length();\n\n        PL();\n        PE(multiply_communication_copy);\n        MPI_Request req;\n        MPI_Rget(expanded_matrix + m.length() * displacements[rank],\n                 b_size,\n                 mpi_type,\n                 rank,\n                 0,\n                 b_size,\n                 mpi_type,\n                 win,\n                 &req);\n\n        int finished = false;\n        while (!finished) {\n            MPI_Test(&req, &finished, MPI_STATUS_IGNORE);\n            if (!finished) {\n                std::this_thread::yield();\n            } else {\n                ready++;\n            }\n        }\n        PL();\n\n        PE(multiply_communication_other);\n        dist++;\n    }\n\n    MPI_Win_unlock_all(win);\n    MPI_Win_free(&win);\n    PL();\n}\n\ntemplate <typename Scalar>\nvoid comm_task_mn_split_busy_waiting(int divisor,\n                                     int gp,\n                                     Scalar *original_matrix,\n                                     Scalar *expanded_matrix,\n                                     Interval m,\n                                     Interval k,\n                                     std::vector<int> &displacements,\n                                     std::atomic_int &ready,\n                                     MPI_Comm comm) {\n    // copy the matrix that wasn't divided in this step\n    PE(multiply_communication_other);\n    int local_size = m.length() * k.subinterval(divisor, gp).length();\n\n    MPI_Win win = create_window(comm, original_matrix, local_size, false);\n\n#ifdef DEBUG\n    std::cout << \"window content: \" << std::endl;\n    for (int i = 0; i < local_size; ++i) {\n        std::cout << *(original_matrix + i) << \", \";\n    }\n    std::cout << std::endl;\n#endif\n\n    // MPI_Comm mpi_comm = comm.active_comm(step);\n    // MPI_Win_lock_all(MPI_MODE_NOCHECK, win);\n    MPI_Win_lock_all(MPI_MODE_NOCHECK, win);\n\n    auto mpi_type = mpi_mapper<Scalar>::getType();\n    int dist = 1;\n    while (dist < divisor) {\n        int rank = (gp + dist) % divisor;\n        int b_size = m.length() * k.subinterval(divisor, rank).length();\n        PL();\n#ifdef DEBUG\n        std::cout << \"Getting a piece from rank \" << rank << std::endl;\n#endif\n        PE(multiply_communication_copy);\n        MPI_Get(expanded_matrix + m.length() * displacements[rank],\n                b_size,\n                mpi_type,\n                rank,\n                0,\n                b_size,\n                mpi_type,\n                win);\n\n        // flush completes the operation locally\n        // but since this is a Get operation,\n        // then it also means that after flush\n        // it will also be completed remotely\n        MPI_Win_flush_local(rank, win);\n        PL();\n\n        PE(multiply_communication_other);\n        dist++;\n        ready++;\n    }\n\n    MPI_Win_unlock_all(win);\n    MPI_Win_free(&win);\n    PL();\n}\n\n/* OVERLAP OF M SPLIT WITH OPENMP\n * template<typename Scalar>\n    void overlap_m_split(context& ctx, MPI_Comm comm, int rank, int\ndivisor, CosmaMatrix& matrixA, CosmaMatrix& matrixB, CosmaMatrix& matrixC,\n            Interval& m, Interval& n, Interval& k, Interval& P, Scalar beta)\n{ PE(multiply_communication_copy);\n\n        int gp, off;\n        std::tie(gp, off) = P.locate_in_subinterval(divisor, rank);\n\n        CosmaMatrix& expanded_mat = matrixB;\n        int buffer_idx = expanded_mat.buffer_index();\n        expanded_mat.advance_buffer();\n\n        Scalar* original_matrix = expanded_mat.current_matrix();\n        Scalar* expanded_matrix = expanded_mat.buffer_ptr();\n\n        // interval of m that this rank owns from this step on\n        Interval newm = m.subinterval(divisor, gp);\n\n        // copy the matrix that wasn't divided in this step\n        int local_size = k.length() * n.subinterval(divisor, gp).length();\n\n        // offsets in the expanded matrix for each rank\n        std::vector<int> displacements_n(divisor);\n        int disp = 0;\n\n        for (int rank = 0; rank < divisor; ++rank) {\n            displacements_n[rank] = disp;\n            disp += n.subinterval(divisor, rank).length();\n        }\n        // b: k * disp\n        // c: newm * disp\n\n        Scalar* prev_a = matrixA.current_matrix();\n        Scalar* prev_b = expanded_matrix;\n        Scalar* prev_c = matrixC.current_matrix();\n\n        MPI_Win win = create_window(comm, original_matrix, local_size,\nfalse);\n\n        auto mpi_type = mpi_mapper<Scalar>::getType();\n\n#ifdef DEBUG\n        std::cout << \"window content: \" << std::endl;\n        for (int i = 0; i < local_size; ++i) {\n            std::cout << *(original_matrix + i) << \", \";\n        }\n        std::cout << std::endl;\n#endif\n\n        // MPI_Comm mpi_comm = comm.active_comm(step);\n        // MPI_Win_lock_all(MPI_MODE_NOCHECK, win);\n        MPI_Win_lock_all(MPI_MODE_NOCHECK, win);\n\n#pragma omp parallel num_threads(2)\n        {\n#pragma omp single nowait\n#pragma omp critical\n        {\n            // compute the piece that is already owned\n            Scalar* pointer_b = original_matrix;\n            Scalar* pointer_c = prev_c + newm.length() *\ndisplacements_n[gp];\n\n            matrixB.set_current_matrix(pointer_b);\n            matrixC.set_current_matrix(pointer_c);\n\n            PL();\n            local_multiply(ctx, matrixA.current_matrix(),\nmatrixB.current_matrix(), matrixC.current_matrix(), newm.length(),\n                    n.subinterval(divisor, gp).length(), k.length(), beta);\n            PE(multiply_communication_copy);\n        }\n\n#pragma omp single nowait\n        for (int dist = 1; dist < divisor; dist++) {\n            int rank = (gp+dist)%divisor;\n            int b_size = k.length() * n.subinterval(divisor, rank).length();\n\n            MPI_Get(expanded_matrix + k.length() * displacements_n[rank],\nb_size, mpi_type, rank, 0, b_size, mpi_type, win);\n\n            // flush completes the operation locally\n            // but since this is a Get operation,\n            // then it also means that after flush\n            // it will also be completed remotely\n            MPI_Win_flush_local(rank, win);\n\n#pragma omp task firstprivate(dist, divisor)\n#pragma omp critical\n                {\n                    // Compute the piece that has arrived\n                    Scalar* pointer_b = expanded_matrix + k.length() *\ndisplacements_n[rank]; Scalar* pointer_c = prev_c + newm.length() *\ndisplacements_n[rank];\n\n                    matrixB.set_current_matrix(pointer_b);\n                    matrixC.set_current_matrix(pointer_c);\n\n                    PL();\n                    local_multiply_cpu(matrixA.current_matrix(),\nmatrixB.current_matrix(), matrixC.current_matrix(), newm.length(),\n                            n.subinterval(divisor, rank).length(),\nk.length(), beta);\n                    // local_multiply(ctx, matrixA.current_matrix(),\nmatrixB.current_matrix(),\n                    //         matrixC.current_matrix(), newm.length(),\n                    //         n.subinterval(divisor, rank).length(),\nk.length(), beta); PE(multiply_communication_copy);\n                }\n            }\n#pragma omp taskwait\n        }\n\n        MPI_Win_unlock_all(win);\n        MPI_Win_free(&win);\n\n        expanded_mat.set_current_matrix(original_matrix);\n        expanded_mat.set_buffer_index(buffer_idx);\n        matrixC.set_current_matrix(prev_c);\n\n        PL();\n    }\n*/\n// ***********************************\n//           DIVISION BY M\n// ***********************************\ntemplate <typename Scalar>\nvoid overlap_m_split(bool use_busy_waiting,\n                     cosma_context<Scalar> *ctx,\n                     MPI_Comm comm,\n                     int rank,\n                     int divisor,\n                     CosmaMatrix<Scalar> &matrixA,\n                     CosmaMatrix<Scalar> &matrixB,\n                     CosmaMatrix<Scalar> &matrixC,\n                     Interval &m,\n                     Interval &n,\n                     Interval &k,\n                     Interval &P,\n                     Scalar alpha,\n                     Scalar beta) {\n    PE(multiply_communication_other);\n    int gp, off;\n    std::tie(gp, off) = P.locate_in_subinterval(divisor, rank);\n\n    CosmaMatrix<Scalar> &expanded_mat = matrixB;\n    int buffer_idx = expanded_mat.buffer_index();\n    expanded_mat.advance_buffer();\n\n    Scalar *original_matrix = expanded_mat.current_matrix();\n    Scalar *expanded_matrix = expanded_mat.buffer_ptr();\n\n    // interval of m that this rank owns from this step on\n    Interval newm = m.subinterval(divisor, gp);\n\n    // copy the matrix that wasn't divided in this step\n    int local_size = k.length() * n.subinterval(divisor, gp).length();\n\n    // offsets in the expanded matrix for each rank\n    std::vector<int> displacements_n(divisor);\n    int disp = 0;\n\n    for (int rank = 0; rank < divisor; ++rank) {\n        displacements_n[rank] = disp;\n        disp += n.subinterval(divisor, rank).length();\n    }\n    // b: k * disp\n    // c: newm * disp\n\n    std::atomic_int ready(0);\n    std::thread comm_thread(use_busy_waiting\n                                ? comm_task_mn_split_busy_waiting<Scalar>\n                                : comm_task_mn_split_polling<Scalar>,\n                            divisor,\n                            gp,\n                            original_matrix,\n                            expanded_matrix,\n                            k,\n                            n,\n                            std::ref(displacements_n),\n                            std::ref(ready),\n                            comm);\n\n    Scalar *prev_a = matrixA.current_matrix();\n    Scalar *prev_b = expanded_matrix;\n    Scalar *prev_c = matrixC.current_matrix();\n\n    // compute the piece that is already owned\n    Scalar *pointer_b = original_matrix;\n    Scalar *pointer_c = prev_c + newm.length() * displacements_n[gp];\n\n    matrixB.set_current_matrix(pointer_b);\n    matrixC.set_current_matrix(pointer_c);\n\n    PL();\n    bool copy_c_back = true;\n    local_multiply(ctx,\n                   matrixA.current_matrix(),\n                   matrixB.current_matrix(),\n                   matrixC.current_matrix(),\n                   newm.length(),\n                   n.subinterval(divisor, gp).length(),\n                   k.length(),\n                   alpha,\n                   beta,\n                   copy_c_back);\n    PE(multiply_communication_other);\n\n    int dist = 1;\n    while (dist < divisor) {\n        while (ready > 0) {\n            int idx = (gp + dist) % divisor;\n\n            // Compute the piece that has arrived\n            Scalar *pointer_b =\n                expanded_matrix + k.length() * displacements_n[idx];\n            Scalar *pointer_c = prev_c + newm.length() * displacements_n[idx];\n\n            matrixB.set_current_matrix(pointer_b);\n            matrixC.set_current_matrix(pointer_c);\n\n            PL();\n            local_multiply(ctx,\n                           matrixA.current_matrix(),\n                           matrixB.current_matrix(),\n                           matrixC.current_matrix(),\n                           newm.length(),\n                           n.subinterval(divisor, idx).length(),\n                           k.length(),\n                           alpha,\n                           beta,\n                           copy_c_back);\n            PE(multiply_communication_copy);\n            ready--;\n            dist++;\n        }\n    }\n\n    expanded_mat.set_current_matrix(original_matrix);\n    expanded_mat.set_buffer_index(buffer_idx);\n    matrixC.set_current_matrix(prev_c);\n\n    comm_thread.join();\n    PL();\n}\n\n// ***********************************\n//           DIVISION BY N\n// ***********************************\ntemplate <typename Scalar>\nvoid overlap_n_split(bool use_busy_waiting,\n                     cosma_context<Scalar> *ctx,\n                     MPI_Comm comm,\n                     int rank,\n                     int divisor,\n                     CosmaMatrix<Scalar> &matrixA,\n                     CosmaMatrix<Scalar> &matrixB,\n                     CosmaMatrix<Scalar> &matrixC,\n                     Interval &m,\n                     Interval &n,\n                     Interval &k,\n                     Interval &P,\n                     Scalar alpha,\n                     Scalar beta) {\n    PE(multiply_communication_other);\n    int gp, off;\n    std::tie(gp, off) = P.locate_in_subinterval(divisor, rank);\n\n    CosmaMatrix<Scalar> &expanded_mat = matrixA;\n\n    int buffer_idx = expanded_mat.buffer_index();\n    expanded_mat.advance_buffer();\n\n    Scalar *original_matrix = expanded_mat.current_matrix();\n    Scalar *expanded_matrix = expanded_mat.buffer_ptr();\n    // expanded_mat.set_current_matrix(expanded_matrix);\n\n    Scalar *prev_a = expanded_matrix;\n    Scalar *prev_b = matrixB.current_matrix();\n\n    Interval newn = n.subinterval(divisor, gp);\n\n    int local_size = m.length() * k.subinterval(divisor, gp).length();\n\n    // offsets in the expanded matrix for each rank\n    std::vector<int> displacements_k(divisor);\n    int disp_k = 0;\n    for (int rank = 0; rank < divisor; ++rank) {\n        displacements_k[rank] = disp_k;\n        disp_k += k.subinterval(divisor, rank).length();\n    }\n    // a: m * disp\n\n    // memory enough for the largest block\n    // used to overlap communication and computation\n    std::vector<Scalar> block_buffer(\n        newn.length() * math_utils::int_div_up(k.length(), divisor));\n    // std::cout << \"block buffer size = \" << block_buffer.size() <<\n    // std::endl;\n\n    std::atomic_int ready(1);\n    std::thread comm_thread(use_busy_waiting\n                                ? comm_task_mn_split_busy_waiting<Scalar>\n                                : comm_task_mn_split_polling<Scalar>,\n                            divisor,\n                            gp,\n                            original_matrix,\n                            expanded_matrix,\n                            m,\n                            k,\n                            std::ref(displacements_k),\n                            std::ref(ready),\n                            comm);\n\n    int dist = 0;\n    while (dist < divisor) {\n        while (ready > 0) {\n            int idx = (gp + dist) % divisor;\n\n            // Compute the piece that has arrived\n            Scalar *pointer_a =\n                dist == 0\n                    ? original_matrix\n                    : (expanded_matrix + m.length() * displacements_k[idx]);\n            // Scalar* pointer_b = switch_buffers ? buffer2.data() :\n            // buffer1.data();\n            Scalar *pointer_b = block_buffer.data();\n\n            for (int col = 0; col < newn.length(); ++col) {\n                int column_size = k.subinterval(divisor, idx).length();\n                int start = displacements_k[idx] + k.length() * col;\n                std::memcpy(pointer_b + col * column_size,\n                            prev_b + start,\n                            column_size * sizeof(Scalar));\n            }\n\n            matrixA.set_current_matrix(pointer_a);\n            matrixB.set_current_matrix(pointer_b);\n\n            Scalar new_beta = dist == 0 ? beta : Scalar(1);\n            PL();\n            bool copy_c_back = true;\n            local_multiply(ctx,\n                           matrixA.current_matrix(),\n                           matrixB.current_matrix(),\n                           matrixC.current_matrix(),\n                           m.length(),\n                           newn.length(),\n                           k.subinterval(divisor, idx).length(),\n                           alpha,\n                           new_beta,\n                           copy_c_back);\n            PE(multiply_communication_other);\n\n            dist++;\n            ready--;\n        }\n    }\n    comm_thread.join();\n\n    // revert the current matrix\n    expanded_mat.set_buffer_index(buffer_idx);\n    expanded_mat.set_current_matrix(original_matrix);\n    matrixB.set_current_matrix(prev_b);\n\n    PL();\n}\n\ntemplate <typename Scalar>\nvoid comm_task_k_split(int divisor,\n                       int gp,\n                       int off,\n                       int jump_size,\n                       Scalar *expanded_matrix,\n                       Scalar *recv_buffer,\n                       Interval m,\n                       Interval n,\n                       Interval P,\n                       std::vector<int> &displacements,\n                       int &ready,\n                       std::mutex &mtx,\n                       std::condition_variable &cv,\n                       MPI_Comm comm) {\n    PE(multiply_communication_other);\n\n    int local_size = m.length() * n.subinterval(divisor, gp).length();\n    MPI_Win win = create_window(comm, recv_buffer, local_size, false);\n    auto mpi_type = mpi_mapper<Scalar>::getType();\n\n    int packages = 0;\n    int i = 0;\n    while (packages < divisor) {\n        std::unique_lock<std::mutex> lk(mtx);\n        cv.wait(lk, [packages, divisor, jump_size, &ready]() {\n            int diff = ready - packages;\n            return diff >= jump_size || (divisor - packages < jump_size);\n        });\n\n        packages = ready;\n        lk.unlock();\n        packages = std::min(packages, divisor);\n\n        int diff = packages - i;\n        while (i < packages) {\n            int idx = (gp + i) % divisor;\n            Scalar *pointer_c =\n                expanded_matrix + m.length() * displacements[idx];\n            int b_size = m.length() * n.subinterval(divisor, idx).length();\n\n            PL();\n            PE(multiply_communication_reduce);\n            MPI_Win_lock(MPI_LOCK_EXCLUSIVE, idx, 0, win);\n            MPI_Accumulate(pointer_c,\n                           b_size,\n                           mpi_type,\n                           idx,\n                           0,\n                           b_size,\n                           mpi_type,\n                           MPI_SUM,\n                           win);\n            MPI_Win_unlock(idx, win);\n            PL();\n            PE(multiply_communication_other);\n            i++;\n        }\n    }\n\n    MPI_Win_free(&win);\n    PL();\n}\n\ntemplate <typename Scalar>\nvoid compute(cosma_context<Scalar> *ctx,\n             CosmaMatrix<Scalar> &A,\n             CosmaMatrix<Scalar> &B,\n             CosmaMatrix<Scalar> &C,\n             Scalar *pointer_b,\n             Scalar *pointer_c,\n             Interval &m,\n             Interval &n,\n             Interval &k,\n             std::vector<int> &displacements_n,\n             Scalar alpha,\n             Scalar beta,\n             int start,\n             int end) {\n    if (start >= end)\n        return;\n\n    int n_length = 0;\n    if (end >= displacements_n.size()) {\n        n_length = n.length() - displacements_n[start];\n    } else {\n        n_length = displacements_n[end] - displacements_n[start];\n    }\n\n    pointer_b += k.length() * displacements_n[start];\n    pointer_c += m.length() * displacements_n[start];\n    // Scalar* b = pointer_b + k.length() * displacements_n[i];\n    // Scalar* c = pointer_c + m.length() * displacements_n[i];\n\n    B.set_current_matrix(pointer_b);\n    C.set_current_matrix(pointer_c);\n    // B.set_current_matrix(b);\n    // C.set_current_matrix(c);\n\n    PL();\n    bool copy_c_back = true;\n    local_multiply(ctx,\n                   A.current_matrix(),\n                   B.current_matrix(),\n                   C.current_matrix(),\n                   m.length(),\n                   n_length,\n                   k.length(),\n                   alpha,\n                   beta,\n                   copy_c_back);\n    PE(multiply_communication_other);\n}\n\n// ***********************************\n//           DIVISION BY K\n// ***********************************\ntemplate <typename Scalar>\nvoid overlap_k_split(cosma_context<Scalar> *ctx,\n                     MPI_Comm comm,\n                     int rank,\n                     int divisor,\n                     CosmaMatrix<Scalar> &matrixA,\n                     CosmaMatrix<Scalar> &matrixB,\n                     CosmaMatrix<Scalar> &matrixC,\n                     Interval &m,\n                     Interval &n,\n                     Interval &k,\n                     Interval &P,\n                     Scalar alpha,\n                     Scalar beta) {\n    PE(multiply_communication_other);\n    // int divisor = strategy.divisor(step);\n    int gp, off;\n    std::tie(gp, off) = P.locate_in_subinterval(divisor, rank);\n\n    CosmaMatrix<Scalar> &expanded_mat = matrixC;\n    int buffer_idx = expanded_mat.buffer_index();\n    expanded_mat.advance_buffer();\n\n    Scalar *original_matrix = expanded_mat.current_matrix();\n    Scalar *expanded_matrix = expanded_mat.buffer_ptr();\n\n    expanded_mat.set_buffer_index(buffer_idx);\n    expanded_mat.set_current_matrix(original_matrix);\n\n    int local_size = m.length() * n.subinterval(divisor, gp).length();\n\n    auto accumulate_buffer = \n        (beta != Scalar{0}) ? expanded_mat.reduce_buffer_ptr() : original_matrix;\n    std::fill(accumulate_buffer, accumulate_buffer + local_size, Scalar{0});\n\n    Interval newk = k.subinterval(divisor, gp);\n\n    std::vector<int> displacements_n(divisor);\n    int disp = 0;\n    for (int rank = 0; rank < divisor; ++rank) {\n        displacements_n[rank] = disp;\n        disp += n.subinterval(divisor, rank).length();\n    }\n    // c: m * displacements_n\n    // b: newk * displacements_n\n\n    // std::atomic_int ready(0);\n    int ready = 0;\n    std::mutex mtx;\n    std::condition_variable cv;\n\n    int comp_comm_ratio = 1;\n    int target_jump_size = std::min(comp_comm_ratio, divisor);\n\n    std::thread comm_task(comm_task_k_split<Scalar>,\n                          divisor,\n                          gp,\n                          off,\n                          target_jump_size,\n                          expanded_matrix,\n                          accumulate_buffer,\n                          m,\n                          n,\n                          P,\n                          std::ref(displacements_n),\n                          std::ref(ready),\n                          std::ref(mtx),\n                          std::ref(cv),\n                          comm);\n\n    Scalar *prev_a = matrixA.current_matrix();\n    Scalar *prev_b = matrixB.current_matrix();\n    Scalar *prev_c = expanded_matrix;\n\n    int remainder_packages = 0;\n\n    if (target_jump_size == divisor) {\n        compute(ctx,\n                matrixA,\n                matrixB,\n                matrixC,\n                prev_b,\n                prev_c,\n                m,\n                n,\n                newk,\n                std::ref(displacements_n),\n                alpha,\n                Scalar{0},\n                0,\n                divisor);\n\n        std::unique_lock<std::mutex> lk(mtx);\n        ready = divisor;\n        lk.unlock();\n        cv.notify_one();\n    } else {\n        int processed = 0;\n        int start = gp;\n        int end = gp;\n        while (processed < divisor) {\n            int jump_size = target_jump_size - remainder_packages;\n            remainder_packages = 0;\n            end = (start + jump_size) % divisor;\n\n            if (start < end) {\n                if (start < gp) {\n                    end = std::min(end, gp);\n                }\n\n                compute(ctx,\n                        matrixA,\n                        matrixB,\n                        matrixC,\n                        prev_b,\n                        prev_c,\n                        m,\n                        n,\n                        newk,\n                        std::ref(displacements_n),\n                        alpha,\n                        Scalar{0},\n                        start,\n                        end);\n\n                processed += end - start;\n                std::unique_lock<std::mutex> lk(mtx);\n                ready += end - start;\n                lk.unlock();\n                cv.notify_one();\n\n                if (processed < divisor) {\n                    int next_end = end + 1;\n                    if (next_end <= divisor) {\n                        compute(ctx,\n                                matrixA,\n                                matrixB,\n                                matrixC,\n                                prev_b,\n                                prev_c,\n                                m,\n                                n,\n                                newk,\n                                std::ref(displacements_n),\n                                alpha,\n                                Scalar{0},\n                                end,\n                                next_end);\n                        processed++;\n                        remainder_packages = 1;\n\n                        std::unique_lock<std::mutex> lk(mtx);\n                        ready++;\n                        lk.unlock();\n                        cv.notify_one();\n                    }\n                }\n            } else {\n                if (end >= gp) {\n                    end = std::min(end, gp);\n                }\n\n                compute(ctx,\n                        matrixA,\n                        matrixB,\n                        matrixC,\n                        prev_b,\n                        prev_c,\n                        m,\n                        n,\n                        newk,\n                        std::ref(displacements_n),\n                        alpha,\n                        Scalar{0},\n                        start,\n                        divisor);\n                compute(ctx,\n                        matrixA,\n                        matrixB,\n                        matrixC,\n                        prev_b,\n                        prev_c,\n                        m,\n                        n,\n                        newk,\n                        std::ref(displacements_n),\n                        alpha,\n                        Scalar{0},\n                        0,\n                        end);\n\n                processed += divisor - start + end;\n\n                std::unique_lock<std::mutex> lk(mtx);\n                ready += divisor - start + end;\n                lk.unlock();\n\n                cv.notify_one();\n\n                if (processed < divisor) {\n                    int next_end = end + 1;\n                    if (next_end <= gp) {\n                        compute(ctx,\n                                matrixA,\n                                matrixB,\n                                matrixC,\n                                prev_b,\n                                prev_c,\n                                m,\n                                n,\n                                newk,\n                                std::ref(displacements_n),\n                                alpha,\n                                Scalar{0},\n                                end,\n                                next_end);\n                        processed++;\n                        remainder_packages = 1;\n\n                        std::unique_lock<std::mutex> lk(mtx);\n                        ready++;\n                        lk.unlock();\n                        cv.notify_one();\n                    }\n                }\n            }\n            start = (1 + end) % divisor; // t = (end + 1) % divisor;\n            // start = (end) % divisor;\n        }\n        if (remainder_packages > 0) {\n            cv.notify_one();\n        }\n    }\n\n    comm_task.join();\n\n    if (beta != Scalar{0}) {\n        for (unsigned i = 0u; i < local_size; ++i) {\n            original_matrix[i] = original_matrix[i] * beta + accumulate_buffer[i];\n        }\n    }\n\n    PL();\n}\n\ntemplate <typename Scalar>\nvoid overlap_comm_and_comp(cosma_context<Scalar> *ctx,\n                           MPI_Comm comm,\n                           int rank,\n                           const Strategy strategy,\n                           CosmaMatrix<Scalar> &matrixA,\n                           CosmaMatrix<Scalar> &matrixB,\n                           CosmaMatrix<Scalar> &matrixC,\n                           Interval &m,\n                           Interval &n,\n                           Interval &k,\n                           Interval &P,\n                           size_t step,\n                           Scalar alpha,\n                           Scalar beta) {\n    bool use_busy_waiting = strategy.use_busy_waiting;\n    int divisor = strategy.divisor(step);\n    if (strategy.split_m(step)) {\n        overlap_m_split(use_busy_waiting,\n                        ctx,\n                        comm,\n                        rank,\n                        divisor,\n                        matrixA,\n                        matrixB,\n                        matrixC,\n                        m,\n                        n,\n                        k,\n                        P,\n                        alpha,\n                        beta);\n    } else if (strategy.split_n(step)) {\n        overlap_n_split(use_busy_waiting,\n                        ctx,\n                        comm,\n                        rank,\n                        divisor,\n                        matrixA,\n                        matrixB,\n                        matrixC,\n                        m,\n                        n,\n                        k,\n                        P,\n                        alpha,\n                        beta);\n    } else {\n        overlap_k_split(ctx,\n                        comm,\n                        rank,\n                        divisor,\n                        matrixA,\n                        matrixB,\n                        matrixC,\n                        m,\n                        n,\n                        k,\n                        P,\n                        alpha,\n                        beta);\n    }\n}\n\ntemplate void overlap_comm_and_comp<float>(cosma_context<float> *ctx,\n                                           MPI_Comm comm,\n                                           int rank,\n                                           const Strategy strategy,\n                                           CosmaMatrix<float> &matrixA,\n                                           CosmaMatrix<float> &matrixB,\n                                           CosmaMatrix<float> &matrixC,\n                                           Interval &m,\n                                           Interval &n,\n                                           Interval &k,\n                                           Interval &P,\n                                           size_t step,\n                                           float alpha,\n                                           float beta);\n\ntemplate void overlap_comm_and_comp<double>(cosma_context<double> *ctx,\n                                            MPI_Comm comm,\n                                            int rank,\n                                            const Strategy strategy,\n                                            CosmaMatrix<double> &matrixA,\n                                            CosmaMatrix<double> &matrixB,\n                                            CosmaMatrix<double> &matrixC,\n                                            Interval &m,\n                                            Interval &n,\n                                            Interval &k,\n                                            Interval &P,\n                                            size_t step,\n                                            double alpha,\n                                            double beta);\n\ntemplate void overlap_comm_and_comp<std::complex<float>>(\n    cosma_context<std::complex<float>> *ctx,\n    MPI_Comm comm,\n    int rank,\n    const Strategy strategy,\n    CosmaMatrix<std::complex<float>> &matrixA,\n    CosmaMatrix<std::complex<float>> &matrixB,\n    CosmaMatrix<std::complex<float>> &matrixC,\n    Interval &m,\n    Interval &n,\n    Interval &k,\n    Interval &P,\n    size_t step,\n    std::complex<float> alpha,\n    std::complex<float> beta);\n\ntemplate void overlap_comm_and_comp<std::complex<double>>(\n    cosma_context<std::complex<double>> *ctx,\n    MPI_Comm comm,\n    int rank,\n    const Strategy strategy,\n    CosmaMatrix<std::complex<double>> &matrixA,\n    CosmaMatrix<std::complex<double>> &matrixB,\n    CosmaMatrix<std::complex<double>> &matrixC,\n    Interval &m,\n    Interval &n,\n    Interval &k,\n    Interval &P,\n    size_t step,\n    std::complex<double> alpha,\n    std::complex<double> beta);\n\n} // end namespace one_sided_communicator\n\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/one_sided_communicator.hpp",
    "content": "#pragma once\n#include <cosma/context.hpp>\n#include <cosma/interval.hpp>\n#include <cosma/matrix.hpp>\n#include <cosma/strategy.hpp>\n\n#include <mpi.h>\n\nnamespace cosma {\n\nnamespace one_sided_communicator {\n\ntemplate <typename Scalar>\nvoid overlap_comm_and_comp(cosma_context<Scalar> *ctx,\n                           MPI_Comm comm,\n                           int rank,\n                           const Strategy strategy,\n                           CosmaMatrix<Scalar> &matrixA,\n                           CosmaMatrix<Scalar> &matrixB,\n                           CosmaMatrix<Scalar> &matrixC,\n                           Interval &m,\n                           Interval &n,\n                           Interval &k,\n                           Interval &P,\n                           size_t step,\n                           Scalar alpha,\n                           Scalar beta);\n\n}; // namespace one_sided_communicator\n\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/pinned_buffers.cpp",
    "content": "#include <complex>\n#include <cosma/pinned_buffers.hpp>\n\n// container of pinned buffers\ntemplate <typename T>\nvoid pinned_buffers<T>::add(T* ptr, std::size_t size) {\n    auto elem_iter = list.find(ptr);\n    // if already pinned\n    if (elem_iter != list.end()) {\n        // check if the requested size is > pinned size\n        // and in that case unpin the ptr\n        if (size > elem_iter->second) {\n            // unpin\n            auto status = gpu::runtime_api::host_unregister((void*) ptr);\n            gpu::check_runtime_status(status);\n\n            // pin with the new size\n            status = gpu::runtime_api::host_register(\n                    (void*) ptr,\n                    size * sizeof(T),\n                    gpu::runtime_api::flag::HostRegisterDefault);\n            gpu::check_runtime_status(status);\n            elem_iter->second = size;\n        }\n    } else {\n        // if not pinned previously\n        // pin the buffer\n        auto status = gpu::runtime_api::host_register(\n                (void*) ptr,\n                size * sizeof(T),\n                gpu::runtime_api::flag::HostRegisterDefault);\n        gpu::check_runtime_status(status);\n        list.emplace(ptr, size);\n    }\n}\n\ntemplate <typename T>\nvoid pinned_buffers<T>::clear() {\n    for (auto& elem : list) {\n        // unpin the buffer\n        auto status = gpu::runtime_api::host_unregister((void*) elem.first);\n        gpu::check_runtime_status(status);\n    }\n    list.clear();\n}\n\n// template instantiation for pinned_buffers\ntemplate struct pinned_buffers<float>;\ntemplate struct pinned_buffers<double>;\ntemplate struct pinned_buffers<std::complex<float>>;\ntemplate struct pinned_buffers<std::complex<double>>;\n"
  },
  {
    "path": "src/cosma/pinned_buffers.hpp",
    "content": "#ifdef COSMA_HAVE_GPU\n#pragma once\n#include <unordered_map>\n#include <complex>\n\n#include <Tiled-MM/util.hpp>\n\n// container of pinned buffers\ntemplate <typename T>\nstruct pinned_buffers {\n    std::unordered_map<T*, std::size_t> list;\n\n    void add(T* ptr, std::size_t size);\n\n    void clear();\n};\n#endif\n"
  },
  {
    "path": "src/cosma/prefixed_pxgemm.cpp",
    "content": "#include <cosma/cosma_pxgemm.hpp>\n\nextern \"C\" {\n#include <cosma/prefixed_pxgemm.h>\n\n// scalapack routines that will be invoked if problem too small for COSMA\nvoid psgemm_(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc);\n\nvoid pdgemm_(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc);\n\nvoid pcgemm_(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc);\n\nvoid pzgemm_(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc);\n\n// Reimplement ScaLAPACK signatures functions\nvoid cosma_pdgemm_(const char* trans_a,\n            const char* trans_b,\n            const int* m,\n            const int* n,\n            const int* k,\n            const double* alpha,\n            const double *a,\n            const int* ia,\n            const int* ja,\n            const int* desca,\n            const double *b,\n            const int* ib,\n            const int* jb,\n            const int *descb,\n            const double* beta,\n            double *c,\n            const int* ic,\n            const int* jc,\n            const int* descc) {\n    if (cosma::is_problem_too_small(*m, *n, *k)) {\n        pdgemm_(trans_a, trans_b, m, n, k, alpha, a, ia, ja, desca, b, ib, jb, descb, beta, c, ic, jc, descc);\n        return;\n    }\n    cosma::pxgemm<double>(*trans_a,\n                  *trans_b,\n                  *m,\n                  *n,\n                  *k,\n                  *alpha,\n                  a,\n                  *ia,\n                  *ja,\n                  desca,\n                  b,\n                  *ib,\n                  *jb,\n                  descb,\n                  *beta,\n                  c,\n                  *ic,\n                  *jc,\n                  descc);\n}\n\nvoid cosma_psgemm_(const char* trans_a,\n             const char* trans_b,\n             const int* m,\n             const int* n,\n             const int* k,\n             const float* alpha,\n             const float* a,\n             const int* ia,\n             const int* ja,\n             const int* desca,\n             const float* b,\n             const int* ib,\n             const int* jb,\n             const int* descb,\n             const float* beta,\n             float* c,\n             const int* ic,\n             const int* jc,\n             const int* descc) {\n    if (cosma::is_problem_too_small(*m, *n, *k)) {\n        psgemm_(trans_a, trans_b, m, n, k, alpha, a, ia, ja, desca, b, ib, jb, descb, beta, c, ic, jc, descc);\n        return;\n    }\n    cosma::pxgemm<float>(*trans_a,\n                 *trans_b,\n                 *m,\n                 *n,\n                 *k,\n                 *alpha,\n                 a,\n                 *ia,\n                 *ja,\n                 desca,\n                 b,\n                 *ib,\n                 *jb,\n                 descb,\n                 *beta,\n                 c,\n                 *ic,\n                 *jc,\n                 descc);\n}\n\nvoid cosma_pcgemm_(const char* trans_a,\n            const char* trans_b,\n            const int* m,\n            const int* n,\n            const int* k,\n            const float * alpha,\n            const float * a,\n            const int* ia,\n            const int* ja,\n            const int *desca,\n            const float * b,\n            const int* ib,\n            const int* jb,\n            const int *descb,\n            const float * beta,\n            float  *c,\n            const int* ic,\n            const int* jc,\n            const int *descc) {\n    if (cosma::is_problem_too_small(*m, *n, *k)) {\n        pcgemm_(trans_a, trans_b, m, n, k, alpha, a, ia, ja, desca, b, ib, jb, descb, beta, c, ic, jc, descc);\n        return;\n    }\n\n    cosma::pxgemm<std::complex<float>>(*trans_a,\n                    *trans_b,\n                    *m,\n                    *n,\n                    *k,\n                    reinterpret_cast<const std::complex<float>&>(*alpha),\n                    reinterpret_cast<const std::complex<float>*>(a),\n                    *ia,\n                    *ja,\n                    desca,\n                    reinterpret_cast<const std::complex<float>*>(b),\n                    *ib,\n                    *jb,\n                    descb,\n                    reinterpret_cast<const std::complex<float>&>(*beta),\n                    reinterpret_cast<std::complex<float>*>(c),\n                    *ic,\n                    *jc,\n                    descc);\n}\n\nvoid cosma_pzgemm_(const char* trans_a,\n            const char* trans_b,\n            const int* m,\n            const int* n,\n            const int* k,\n            const double * alpha,\n            const double * a,\n            const int* ia,\n            const int* ja,\n            const int *desca,\n            const double  *b,\n            const int* ib,\n            const int* jb,\n            const int* descb,\n            const double * beta,\n            double * c,\n            const int* ic,\n            const int* jc,\n            const int *descc) {\n    if (cosma::is_problem_too_small(*m, *n, *k)) {\n        pzgemm_(trans_a, trans_b, m, n, k, alpha, a, ia, ja, desca, b, ib, jb, descb, beta, c, ic, jc, descc);\n        return;\n    }\n\n    cosma::pxgemm<std::complex<double>>(*trans_a,\n                     *trans_b,\n                     *m,\n                     *n,\n                     *k,\n                     reinterpret_cast<const std::complex<double>&>(*alpha),\n                     reinterpret_cast<const std::complex<double>*>(a),\n                     *ia,\n                     *ja,\n                     desca,\n                     reinterpret_cast<const std::complex<double>*>(b),\n                     *ib,\n                     *jb,\n                     descb,\n                     reinterpret_cast<const std::complex<double>&>(*beta),\n                     reinterpret_cast<std::complex<double>*>(c),\n                     *ic,\n                     *jc,\n                     descc);\n}\n\n// *********************************************************************************\n// Same as previously, but with added underscore at the end.\n// This is used for fortran interfaces, in case fortran expects these symbols\n// *********************************************************************************\n\nvoid cosma_psgemm(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc) {\n    cosma_psgemm_(trans_a, transb, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid cosma_pdgemm(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc) {\n    cosma_pdgemm_(trans_a, transb, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid cosma_pcgemm(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc) {\n    cosma_pcgemm_(trans_a, transb, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid cosma_pzgemm(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc) {\n    cosma_pzgemm_(trans_a, transb, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\n// *********************************************************************************\n// Same as previously, but with double underscore at the end.\n// This is used for fortran interfaces, in case fortran expects these symbols\n// *********************************************************************************\n\nvoid COSMA_PSGEMM_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc) {\n    cosma_psgemm_(trans_a, transb, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid COSMA_PDGEMM_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc) {\n    cosma_pdgemm_(trans_a, transb, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid COSMA_PCGEMM_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc) {\n    cosma_pcgemm_(trans_a, transb, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid COSMA_PZGEMM_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc) {\n    cosma_pzgemm_(trans_a, transb, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\n// *********************************************************************************\n// Same as previously, but CAPITALIZED.\n// This is used for fortran interfaces, in case fortran expects these symbols\n// *********************************************************************************\n\nvoid COSMA_PSGEMM(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc) {\n    cosma_psgemm_(trans_a, transb, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid COSMA_PDGEMM(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc) {\n    cosma_pdgemm_(trans_a, transb, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid COSMA_PCGEMM(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc) {\n    cosma_pcgemm_(trans_a, transb, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid COSMA_PZGEMM(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc) {\n    cosma_pzgemm_(trans_a, transb, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n}\n"
  },
  {
    "path": "src/cosma/prefixed_pxgemm.h",
    "content": "#pragma once\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n// ScaLAPACK API with COSMA prefix\nvoid cosma_psgemm(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc);\n\nvoid cosma_pdgemm(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc);\n\nvoid cosma_pcgemm(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc);\n\nvoid cosma_pzgemm(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc);\n\n// *********************************************************************************\n// Same as previously, but with added underscore at the end.\n// This is used for fortran interfaces, in case fortran expects these symbols\n// *********************************************************************************\n\nvoid cosma_psgemm_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc);\n\nvoid cosma_pdgemm_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc);\n\nvoid cosma_pcgemm_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc);\n\nvoid cosma_pzgemm_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc);\n\n// *********************************************************************************\n// Same as previously, but with double underscore at the end.\n// This is used for fortran interfaces, in case fortran expects these symbols\n// *********************************************************************************\nvoid COSMA_PSGEMM_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc);\n\nvoid COSMA_PDGEMM_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc);\n\nvoid COSMA_PCGEMM_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc);\n\nvoid COSMA_PZGEMM_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc);\n\n// *********************************************************************************\n// Same as previously, but CAPITALIZED.\n// This is used for fortran interfaces, in case fortran expects these symbols\n// *********************************************************************************\n\nvoid COSMA_PSGEMM(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc);\n\nvoid COSMA_PDGEMM(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc);\n\nvoid COSMA_PCGEMM(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc);\n\nvoid COSMA_PZGEMM(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc);\n\n#ifdef __cplusplus\n}\n#endif\n"
  },
  {
    "path": "src/cosma/profiler.hpp",
    "content": "#pragma once\n\n// The header makes semiprof an optional dependency that needs not be shipped when COSMA is installed.\n//\n#ifdef COSMA_WITH_PROFILING\n\n#include <semiprof.hpp>\n\n// prints the profiler summary\n#define PP() std::cout << semiprof::profiler_summary() << \"\\n\"\n// clears the profiler (counts and timings)\n#define PC() semiprof::profiler_clear()\n\n#else\n#define PE(name)\n#define PL()\n#define PP()\n#define PC()\n#endif\n"
  },
  {
    "path": "src/cosma/pxgemm.cpp",
    "content": "#include <cosma/cosma_pxgemm.hpp>\n\nextern \"C\" {\n#include <cosma/pxgemm.h>\n#include <cosma/interpose.h>\n\n// Reimplement ScaLAPACK signatures functions\nINTERPOSE_C_VOID(psgemm_,\n        (const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc),\n        (trans_a, trans_b, m, n, k, alpha, a, ia, ja, desca, b, ib, jb, descb, beta, c, ic, jc, descc)\n        ) {\n    if (cosma::is_problem_too_small(*m, *n, *k)) {\n        Real__psgemm_(trans_a, trans_b, m, n, k, alpha, a, ia, ja, desca, b, ib, jb, descb, beta, c, ic, jc, descc);\n        return;\n    }\n    cosma::pxgemm<float>(*trans_a,\n                 *trans_b,\n                 *m,\n                 *n,\n                 *k,\n                 *alpha,\n                 a,\n                 *ia,\n                 *ja,\n                 desca,\n                 b,\n                 *ib,\n                 *jb,\n                 descb,\n                 *beta,\n                 c,\n                 *ic,\n                 *jc,\n                 descc);\n}\n\nINTERPOSE_C_VOID(pdgemm_, \n        (const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc),\n        (trans_a, trans_b, m, n, k, alpha, a, ia, ja, desca, b, ib, jb, descb, beta, c, ic, jc, descc)\n        ){\n    if (cosma::is_problem_too_small(*m, *n, *k)) {\n        Real__pdgemm_(trans_a, trans_b, m, n, k, alpha, a, ia, ja, desca, b, ib, jb, descb, beta, c, ic, jc, descc);\n        return;\n    }\n    cosma::pxgemm<double>(*trans_a,\n                  *trans_b,\n                  *m,\n                  *n,\n                  *k,\n                  *alpha,\n                  a,\n                  *ia,\n                  *ja,\n                  desca,\n                  b,\n                  *ib,\n                  *jb,\n                  descb,\n                  *beta,\n                  c,\n                  *ic,\n                  *jc,\n                  descc);\n}\n\nINTERPOSE_C_VOID(pcgemm_, \n        (const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc),\n        (trans_a, trans_b, m, n, k, alpha, a, ia, ja, desca, b, ib, jb, descb, beta, c, ic, jc, descc)\n        ){\n    if (cosma::is_problem_too_small(*m, *n, *k)) {\n        Real__pcgemm_(trans_a, trans_b, m, n, k, alpha, a, ia, ja, desca, b, ib, jb, descb, beta, c, ic, jc, descc);\n        return;\n    }\n    cosma::pxgemm<std::complex<float>>(*trans_a,\n                    *trans_b,\n                    *m,\n                    *n,\n                    *k,\n                    reinterpret_cast<const std::complex<float>&>(*alpha),\n                    reinterpret_cast<const std::complex<float>*>(a),\n                    *ia,\n                    *ja,\n                    desca,\n                    reinterpret_cast<const std::complex<float>*>(b),\n                    *ib,\n                    *jb,\n                    descb,\n                    reinterpret_cast<const std::complex<float>&>(*beta),\n                    reinterpret_cast<std::complex<float>*>(c),\n                    *ic,\n                    *jc,\n                    descc);\n}\n\nINTERPOSE_C_VOID(pzgemm_, \n        (const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc),\n        (trans_a, trans_b, m, n, k, alpha, a, ia, ja, desca, b, ib, jb, descb, beta, c, ic, jc, descc)\n        ){\n    if (cosma::is_problem_too_small(*m, *n, *k)) {\n        Real__pzgemm_(trans_a, trans_b, m, n, k, alpha, a, ia, ja, desca, b, ib, jb, descb, beta, c, ic, jc, descc);\n        return;\n    }\n    cosma::pxgemm<std::complex<double>>(*trans_a,\n                     *trans_b,\n                     *m,\n                     *n,\n                     *k,\n                     reinterpret_cast<const std::complex<double>&>(*alpha),\n                     reinterpret_cast<const std::complex<double>*>(a),\n                     *ia,\n                     *ja,\n                     desca,\n                     reinterpret_cast<const std::complex<double>*>(b),\n                     *ib,\n                     *jb,\n                     descb,\n                     reinterpret_cast<const std::complex<double>&>(*beta),\n                     reinterpret_cast<std::complex<double>*>(c),\n                     *ic,\n                     *jc,\n                     descc);\n}\n\n// *********************************************************************************\n// Same as previously, but with added underscore at the end.\n// This is used for fortran interfaces, in case fortran expects these symbols\n// *********************************************************************************\nvoid psgemm(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc) {\n    psgemm_(trans_a, trans_b, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid pdgemm(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc) {\n    pdgemm_(trans_a, trans_b, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid pcgemm(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc) {\n    pcgemm_(trans_a, trans_b, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid pzgemm(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc) {\n    pzgemm_(trans_a, trans_b, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\n// *********************************************************************************\n// Same as previously, but with double underscore at the end.\n// This is used for fortran interfaces, in case fortran expects these symbols\n// *********************************************************************************\n\nvoid PSGEMM_(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc) {\n    psgemm_(trans_a, trans_b, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid PDGEMM_(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc) {\n    pdgemm_(trans_a, trans_b, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid PCGEMM_(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc) {\n    pcgemm_(trans_a, trans_b, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid PZGEMM_(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc) {\n    pzgemm_(trans_a, trans_b, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\n// *********************************************************************************\n// Same as previously, but CAPITALIZED.\n// This is used for fortran interfaces, in case fortran expects these symbols\n// *********************************************************************************\n\nvoid PSGEMM(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc) {\n    psgemm_(trans_a, trans_b, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid PDGEMM(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc) {\n    pdgemm_(trans_a, trans_b, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid PCGEMM(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc) {\n    pcgemm_(trans_a, trans_b, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n\nvoid PZGEMM(const char* trans_a, const char* trans_b, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc) {\n    pzgemm_(trans_a, trans_b, m, n, k,\n           alpha, a, ia, ja, desca,\n           b, ib, jb, descb,\n           beta, c, ic, jc, descc);\n}\n}\n"
  },
  {
    "path": "src/cosma/pxgemm.h",
    "content": "#pragma once\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n// ScaLAPACK API (override)\nvoid psgemm(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc);\n\nvoid pdgemm(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc);\n\nvoid pcgemm(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc);\n\nvoid pzgemm(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc);\n\n// *********************************************************************************\n// Same as previously, but with added underscore at the end.\n// This is used for fortran interfaces, in case fortran expects these symbols\n// *********************************************************************************\n\nvoid psgemm_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc);\n\nvoid pdgemm_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc);\n\nvoid pcgemm_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc);\n\nvoid pzgemm_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc);\n\n// *********************************************************************************\n// Same as previously, but with double underscore at the end.\n// This is used for fortran interfaces, in case fortran expects these symbols\n// *********************************************************************************\n\nvoid PSGEMM_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc);\n\nvoid PDGEMM_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc);\n\nvoid PCGEMM_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc);\n\nvoid PZGEMM_(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc);\n\n// *********************************************************************************\n// Same as previously, but CAPITALIZED.\n// This is used for fortran interfaces, in case fortran expects these symbols\n// *********************************************************************************\n\nvoid PSGEMM(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float* alpha, const float* a, const int* ia, const int* ja, const int* desca,\n        const float* b, const int* ib, const int* jb, const int* descb, const float* beta,\n        float* c, const int* ic, const int* jc, const int* descc);\n\nvoid PDGEMM(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double* alpha, const double* a, const int* ia, const int* ja, const int* desca,\n        const double* b, const int* ib, const int* jb, const int* descb, const double* beta,\n        double* c, const int* ic, const int* jc, const int* descc);\n\nvoid PCGEMM(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const float * alpha, const float * a, const int* ia,\n        const int* ja, const int* desca, const float * b, const int* ib,\n        const int* jb, const int* descb, const float * beta,\n        float * c, const int* ic, const int* jc, const int* descc);\n\nvoid PZGEMM(const char* trans_a, const char* transb, const int* m, const int* n, const int* k,\n        const double * alpha, const double * a, const int* ia,\n        const int* ja, const int* desca, const double * b, const int* ib,\n        const int* jb, const int* descb, const double * beta,\n        double * c, const int* ic, const int* jc, const int* descc);\n\n#ifdef __cplusplus\n}\n#endif\n"
  },
  {
    "path": "src/cosma/pxgemm_params.hpp",
    "content": "// a container class, containing all the parameters of pxgemm\n#pragma once\n#include <algorithm>\n#include <cmath>\n#include <vector>\n#include <stdexcept>\n#include <cosma/scalapack.hpp>\n\nnamespace cosma {\ntemplate <typename T>\nstruct pxgemm_params {\n    // ****************************************\n    // *       INPUT PARAMETERS BEGIN         *\n    // ****************************************\n    // *  global dimensions  *\n    // ***********************\n    // matrix A\n    int ma; // rows\n    int na; // cols\n\n    // matrix B\n    int mb; // rows\n    int nb; // cols\n\n    // matrix C\n    int mc; // rows\n    int nc; // cols\n\n    // ***********************\n    // *     block sizes     *\n    // ***********************\n    // matrix A\n    int bma; // rows\n    int bna; // cols\n\n    // matrix B\n    int bmb; // rows\n    int bnb; // cols\n\n    // matrix C\n    int bmc; // rows\n    int bnc; // cols\n\n    // ***********************\n    // *   submatrices ij    *\n    // ***********************\n    // matrix A\n    int ia = 1; // rows\n    int ja = 1; // cols\n\n    // matrix B\n    int ib = 1; // rows\n    int jb = 1; // cols\n\n    // matrix C\n    int ic = 1; // rows\n    int jc = 1; // cols\n\n    // ***********************\n    // *    problem size     *\n    // ***********************\n    int m;\n    int n;\n    int k;\n\n    // ***********************\n    // *   transpose flags   *\n    // ***********************\n    char trans_a = 'N';\n    char trans_b = 'N';\n\n    // ***********************\n    // *    scaling flags    *\n    // ***********************\n    T alpha = T{1};\n    T beta = T{0};\n\n    // ***********************\n    // *    leading dims     *\n    // ***********************\n    int lld_a;\n    int lld_b;\n    int lld_c;\n\n    // ***********************\n    // *      proc grid      *\n    // ***********************\n    int p_rows; // rows\n    int p_cols; // cols\n    int P;\n    char order = 'R';\n\n    // ***********************\n    // *      proc srcs      *\n    // ***********************\n    // matrix A\n    int src_ma = 0; // rows\n    int src_na = 0; // cols\n\n    // matrix B\n    int src_mb = 0; // rows\n    int src_nb = 0; // cols\n\n    // matrix C\n    int src_mc = 0; // rows\n    int src_nc = 0; // cols\n\n    // ****************************************\n    // *         INPUT PARAMETERS END         *\n    // ****************************************\n    pxgemm_params() = default;\n\n    void initialize(int mm, int nn, int kk,\n                    int block_a1, int block_a2,\n                    int block_b1, int block_b2,\n                    int block_c1, int block_c2,\n                    int prows, int pcols,\n                    char transa, char transb,\n                    T a, T b) {\n        m = mm;\n        n = nn;\n        k = kk;\n\n        // global problem size\n        // m, n, k are just sizes that we want to multiply\n        // starting from (ia-1, ja-1), (ib-1, jb-1) and (ic-1, jc-1)\n        // this makes the global problem size m+ia-1, n+jb-1, k+ja-1\n        // use transa instead of trans_a since trans_a is set afterwards\n        ma = transpose_if(transa, k, m);\n        na = transpose_if(transa, m, k);\n\n        mb = transpose_if(transb, n, k);\n        nb = transpose_if(transb, k, n);\n        mc = m;\n        nc = n;\n\n        // block sizes\n        bma = block_a1;\n        bna = block_a2;\n\n        bmb = block_b1;\n        bnb = block_b2;\n\n        bmc = block_c1;\n        bnc = block_c2;\n\n        // submatrices ij\n        ia = 1; ja = 1;\n        ib = 1; jb = 1;\n        ic = 1; jc = 1;\n\n        // transpose flags\n        trans_a = std::toupper(transa);\n        trans_b = std::toupper(transb);\n\n        // scaling parameters\n        alpha = a;\n        beta = b;\n\n        // proc grid\n        order = 'R';\n        p_rows = prows;\n        p_cols = pcols;\n        P = p_rows * p_cols;\n\n        // leading dims\n        lld_a = scalapack::max_leading_dimension(ma, bma, p_rows);\n        lld_b = scalapack::max_leading_dimension(mb, bmb, p_rows);\n        lld_c = scalapack::max_leading_dimension(mc, bmc, p_rows);\n\n        // proc srcs\n        src_ma = 0; src_na = 0;\n        src_mb = 0; src_nb = 0;\n        src_mc = 0; src_nc = 0;\n    }\n\n    pxgemm_params(int m, int n, int k,\n                  int bm, int bn, int bk,\n                  int prows, int pcols,\n                  char transa, char transb,\n                  T a, T b) {\n        // block sizes\n        // blocks BEFORE transposing (if transposed)\n        bma = transpose_if(transa, bk, bm);\n        bna = transpose_if(transa, bm, bk);\n\n        bmb = transpose_if(transb, bn, bk);\n        bnb = transpose_if(transb, bk, bn);\n\n        bmc = bm;\n        bnc = bn;\n\n        initialize(m, n, k,\n                   bma, bna,\n                   bmb, bnb,\n                   bmc, bnc,\n                   prows, pcols,\n                   transa, transb,\n                   a, b);\n        std::string info;\n        if (!valid(info)) {\n            std::runtime_error(\"WRONG PXGEMM PARAMETER: \" + info);\n        }\n    }\n\n\n    pxgemm_params(int m, int n, int k,\n                  int block_a1, int block_a2,\n                  int block_b1, int block_b2,\n                  int block_c1, int block_c2,\n                  int prows, int pcols,\n                  char transa, char transb,\n                  T a, T b) {\n        initialize(m, n, k,\n                   block_a1, block_a2,\n                   block_b1, block_b2,\n                   block_c1, block_c2,\n                   prows, pcols,\n                   transa, transb,\n                   a, b);\n        std::string info;\n        if (!valid(info)) {\n            std::runtime_error(\"WRONG PXGEMM PARAMETER: \" + info);\n        }\n    }\n\n    pxgemm_params(\n        // global sizes\n        int ma, int na, // matrix A\n        int mb, int nb, // matrix B\n        int mc, int nc, // matrix C\n\n        // block sizes\n        int bma, int bna, // matrix A\n        int bmb, int bnb, // matrix B\n        int bmc, int bnc, // matrix C\n\n        // submatrices ij\n        int ia, int ja, // matrix A\n        int ib, int jb, // matrix B\n        int ic, int jc, // matrix C\n\n        // problem size\n        int m, int n, int k,\n\n        // transpose flags\n        char trans_a, char trans_b,\n\n        // scaling flags\n        T alpha, T beta,\n\n        // leading dimensions\n        int lld_a, int lld_b, int lld_c,\n\n        // processor grid\n        int p_rows, int p_cols,\n        char order,\n\n        // processor srcs\n        int src_ma, int src_na, // matrix A\n        int src_mb, int src_nb, // matrix B\n        int src_mc, int src_nc // matrix C\n    ) :\n        ma(ma), na(na),\n        mb(mb), nb(nb),\n        mc(mc), nc(nc),\n\n        bma(bma), bna(bna),\n        bmb(bmb), bnb(bnb),\n        bmc(bmc), bnc(bnc),\n\n        ia(ia), ja(ja),\n        ib(ib), jb(jb),\n        ic(ic), jc(jc),\n\n        m(m), n(n), k(k),\n\n        trans_a(std::toupper(trans_a)),\n        trans_b(std::toupper(trans_b)),\n\n        alpha(alpha), beta(beta),\n\n        lld_a(lld_a), lld_b(lld_b), lld_c(lld_c),\n\n        order(std::toupper(order)),\n        p_rows(p_rows), p_cols(p_cols),\n        P(p_rows * p_cols),\n\n        src_ma(src_ma), src_na(src_na),\n        src_mb(src_mb), src_nb(src_nb),\n        src_mc(src_mc), src_nc(src_nc)\n    {\n        std::string info;\n        if (!valid(info)) {\n            std::runtime_error(\"WRONG PXGEMM PARAMETER: \" + info);\n        }\n    }\n\n    int transpose_if(char transpose_flag, int row, int col) {\n        bool transposed = transpose_flag != 'N';\n        int result = transposed ? row : col;\n        return result;\n    }\n\n    // if parameters are correct:\n    //     returns true is returned and info = \"\";\n    // else:\n    //     returns false and info = name of the incorrectly set variable;\n    bool valid(std::string& info) {\n        info = \"\";\n        // *************************************************\n        // check if transpose flags have proper values\n        // *************************************************\n        std::vector<char> t_flags = {'N', 'T', 'C'};\n        if (std::find(t_flags.begin(), t_flags.end(), trans_a) == t_flags.end()) {\n            info = \"trans_a = \" + std::to_string(trans_a);\n            return false;\n        }\n        if (std::find(t_flags.begin(), t_flags.end(), trans_b) == t_flags.end()) {\n            info = \"trans_b = \" + std::to_string(trans_b);\n            return false;\n        }\n        if (order != 'R' && order != 'C') {\n            info = \"oder = \" + std::to_string(order);\n            return false;\n        }\n\n        // *************************************************\n        // check if the following values are all positive\n        // *************************************************\n        std::vector<int> positive = {\n             ma, na, mb, nb, mc, nc,\n             bma, bna, bmb, bnb, bmc, bnc,\n             m, n, k,\n             lld_a, lld_b, lld_c,\n             p_rows, p_cols, P,\n        };\n        std::vector<std::string> positive_labels = {\n             \"ma\", \"na\", \"mb\", \"nb\", \"mc\", \"nc\",\n             \"bma\", \"bna\", \"bmb\", \"bnb\", \"bmc\", \"bnc\",\n             \"m\", \"n\", \"k\",\n             \"lld_a\", \"lld_b\", \"lld_c\",\n             \"p_rows\", \"p_cols\", \"P\"\n        };\n        for (std::size_t i = 0; i < positive.size(); ++i) {\n            if (positive[i] < 0) {\n                info = positive_labels[i] + \" = \" + std::to_string(positive[i]);\n                return false;\n            }\n        }\n\n        // *************************************************\n        // check if submatrix start index  \n        // is inside the global matrix\n        // *************************************************\n        // matrix A\n        if (ia < 1 || ia > ma) {\n            info = \"ia = \" + std::to_string(ia);\n            return false;\n        }\n        if (ja < 1 || ja > na) {\n            info = \"ja = \" + std::to_string(ja);\n            return false;\n        }\n\n        // matrix B\n        if (ib < 1 || ib > mb) {\n            info = \"ib = \" + std::to_string(ib);\n            return false;\n        }\n        if (jb < 1 || jb > nb) {\n            info = \"jb = \" + std::to_string(jb);\n            return false;\n        }\n\n        // matrix C\n        if (ic < 1 || ic > mc) {\n            info = \"ic = \" + std::to_string(ic);\n            return false;\n        }\n        if (jc < 1 || jc > nc) {\n            info = \"jc = \" + std::to_string(jc);\n            return false;\n        }\n\n        // *************************************************\n        // check if submatrix end index\n        // is inside the global matrix\n        // *************************************************\n        // matrix A\n        int ma_sub = transpose_if(trans_a, k, m);\n        // guaranteed to be >= ia \n        // (since we previously checked ma_sub >= 1 and ia >= 1)\n        int ma_sub_end = ia - 1 + ma_sub;\n        if (ma_sub_end >= ma) {\n            info = \"ia - 1 + (m or k) = \" + std::to_string(ma_sub_end);\n            return false;\n        }\n        int na_sub = transpose_if(trans_a, m, k);\n        // guaranteed to be >= ja \n        // (since we previously checked na_sub >= 1 and ja >= 1)\n        int na_sub_end = ja - 1 + na_sub;\n        if (na_sub_end >= na) {\n            info = \"ja - 1 + (k or m) = \" + std::to_string(na_sub_end);\n            return false;\n        }\n\n        // matrix B\n        int mb_sub = transpose_if(trans_b, n, k);\n        // guaranteed to be >= ib \n        // (since we previously checked mb_sub >= 1 and ib >= 1)\n        int mb_sub_end = ib - 1 + mb_sub;\n        if (mb_sub_end >= mb) {\n            info = \"ib - 1 + (k or n) = \" + std::to_string(mb_sub_end);\n            return false;\n        }\n        int nb_sub = transpose_if(trans_b, k, n);\n        // guaranteed to be >= jb \n        // (since we previously checked nb_sub >= 1 and jb >= 1)\n        int nb_sub_end = jb - 1 + nb_sub;\n        if (nb_sub_end >= nb) {\n            info = \"jb - 1 + (n or k) = \" + std::to_string(nb_sub_end);\n            return false;\n        }\n\n        // matrix C\n        int mc_sub = m;\n        // guaranteed to be >= ic \n        // (since we previously checked mc_sub >= 1 and ic >= 1)\n        int mc_sub_end = ic - 1 + mc_sub;\n        if (mc_sub_end >= mc) {\n            info = \"ic - 1 + m = \" + std::to_string(mc_sub_end);\n            return false;\n        }\n        int nc_sub = n;\n        // guaranteed to be >= jc \n        // (since we previously checked nc_sub >= 1 and jc >= 1)\n        int nc_sub_end = jc - 1 + nc_sub;\n        if (nc_sub_end >= nc) {\n            info = \"jc - 1 + n = \" + std::to_string(nc_sub_end);\n            return false;\n        }\n\n        // *************************************************\n        // check if row/col src elements are within the \n        // global dimensions of matrices\n        // *************************************************\n        // matrix A\n        if (src_ma < 0 || src_ma >= ma) {\n            info = \"src_ma = \" + std::to_string(src_ma);\n            return false;\n        }\n        if (src_na < 0 || src_na >= na) {\n            info = \"src_na = \" + std::to_string(src_na);\n            return false;\n        }\n\n        // matrix B\n        if (src_mb < 0 || src_mb >= mb) {\n            info = \"src_mb = \" + std::to_string(src_mb);\n            return false;\n        }\n        if (src_nb < 0 || src_nb >= nb) {\n            info = \"src_nb = \" + std::to_string(src_nb);\n            return false;\n        }\n\n        // matrix C\n        if (src_mc < 0 || src_mc >= mc) {\n            info = \"src_mc = \" + std::to_string(src_mc);\n            return false;\n        }\n        if (src_nc < 0 || src_nc >= nc) {\n            info = \"src_nc = \" + std::to_string(src_nc);\n            return false;\n        }\n\n        // *************************************************\n        // check leading dimensions\n        // *************************************************\n        int min_lld_a = scalapack::min_leading_dimension(ma, bma, p_rows);\n        int min_lld_b = scalapack::min_leading_dimension(mb, bmb, p_rows);\n        int min_lld_c = scalapack::min_leading_dimension(mc, bmc, p_rows);\n\n        if (lld_a < min_lld_a) {\n            info = \"lld_a = \" + std::to_string(min_lld_a);\n            return false;\n        }\n        if (lld_b < min_lld_b) {\n            info = \"lld_b = \" + std::to_string(min_lld_b);\n            return false;\n        }\n        if (lld_c < min_lld_c) {\n            info = \"lld_c = \" + std::to_string(min_lld_c);\n            return false;\n        }\n\n        return true;\n    }\n\n    friend std::ostream &operator<<(std::ostream &os,\n                                    const pxgemm_params &obj) {\n        os << \"=============================\" << std::endl;\n        os << \"      GLOBAL MAT. SIZES\" << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"A = \" << obj.ma << \" x \" << obj.na << std::endl;\n        os << \"B = \" << obj.mb << \" x \" << obj.nb << std::endl;\n        os << \"C = \" << obj.mc << \" x \" << obj.nc << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"        SUBMATRICES\" << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"(ia, ja) = (\" << obj.ia << \", \" << obj.ja << \")\" << std::endl;\n        os << \"(ib, jb) = (\" << obj.ib << \", \" << obj.jb << \")\" << std::endl;\n        os << \"(ic, jc) = (\" << obj.ic << \", \" << obj.jc << \")\" << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"      SUBMATRIX SIZES\" << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"m = \" << obj.m << std::endl;\n        os << \"n = \" << obj.n << std::endl;\n        os << \"k = \" << obj.k << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"      ADDITIONAL OPTIONS\" << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"alpha = \" << obj.alpha << std::endl;\n        os << \"beta = \" << obj.beta << std::endl;\n        os << \"trans_a = \" << obj.trans_a << std::endl;\n        os << \"trans_b = \" << obj.trans_b << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"         PROC GRID\" << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"grid = \" << obj.p_rows << \" x \" << obj.p_cols << std::endl;\n        os << \"grid order = \" << obj.order << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"         PROC SRCS\" << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"P_SRC(A) = (\" << obj.src_ma << \", \" << obj.src_na << \")\" << std::endl;\n        os << \"P_SRC(B) = (\" << obj.src_mb << \", \" << obj.src_nb << \")\" << std::endl;\n        os << \"P_SRC(C) = (\" << obj.src_mc << \", \" << obj.src_nc << \")\" << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"          BLOCK SIZES\" << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"Blocks(A) = (\" << obj.bma << \", \" << obj.bna << \")\" << std::endl;\n        os << \"Blocks(B) = (\" << obj.bmb << \", \" << obj.bnb << \")\" << std::endl;\n        os << \"Blocks(C) = (\" << obj.bmc << \", \" << obj.bnc << \")\" << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"          LEADING DIMS\" << std::endl;\n        os << \"=============================\" << std::endl;\n        os << \"lld_a = \" << obj.lld_a << std::endl;\n        os << \"lld_b = \" << obj.lld_b << std::endl;\n        os << \"lld_c = \" << obj.lld_c << std::endl;\n        os << \"=============================\" << std::endl;\n        return os;\n    }\n};\n}\n"
  },
  {
    "path": "src/cosma/random_generator.hpp",
    "content": "#pragma once\n\n#include <complex>\n#include <random>\n\nnamespace cosma {\n/*\n * generators a random int, float, double, \n * complex<int>, complex<float>, complex<double>\n */\ntemplate <typename Scalar>\nstruct random_generator {\n  static inline Scalar sample();\n};\n\ntemplate <>\ninline int random_generator<int>::sample() {\n    static std::random_device dev;                        // seed\n    static std::mt19937 rng(dev());                       // generator\n    static std::uniform_int_distribution<int> dist(0, 10); // distribution\n\n    return dist(rng);\n}\n\ntemplate <>\ninline double random_generator<double>::sample() {\n    static std::random_device dev;                        // seed\n    static std::mt19937 rng(dev());                       // generator\n    static std::uniform_real_distribution<double> dist(0.0, 1.0); // distribution\n\n    return dist(rng);\n}\n\ntemplate <>\ninline float random_generator<float>::sample() {\n    static std::random_device dev;                        // seed\n    static std::mt19937 rng(dev());                       // generator\n    static std::uniform_real_distribution<float> dist(0.0f, 1.0f); // distribution\n\n    return dist(rng);\n}\n\ntemplate <>\ninline std::complex<int> random_generator<std::complex<int>>::sample() {\n    static std::random_device dev;                        // seed\n    static std::mt19937 rng(dev());                       // generator\n    static std::uniform_int_distribution<int> dist(0, 10); // distribution\n    return {dist(rng), dist(rng)};\n}\n\ntemplate <>\ninline std::complex<float> random_generator<std::complex<float>>::sample() {\n    static std::random_device dev;                        // seed\n    static std::mt19937 rng(dev());                       // generator\n    static std::uniform_real_distribution<float> dist(0.0f, 1.0f); // distribution\n    return {dist(rng), dist(rng)};\n}\n\ntemplate <>\ninline std::complex<double> random_generator<std::complex<double>>::sample() {\n    static std::random_device dev;                        // seed\n    static std::mt19937 rng(dev());                       // generator\n    static std::uniform_real_distribution<double> dist(0.0, 1.0); // distribution\n    return {dist(rng), dist(rng)};\n}\n} // end namespace cosma\n"
  },
  {
    "path": "src/cosma/scalapack.cpp",
    "content": "#include <cosma/scalapack.hpp>\n\ncosta::scalapack::ordering cosma::scalapack::rank_ordering(int ctxt, int P) {\n    // check whether rank grid is row-major or col-major\n    auto ordering = costa::scalapack::ordering::column_major;\n    if (P > 1) {\n        int prow, pcol;\n        // check the coordinates of rank 1 to see\n        // if the rank grid is row-major or col-major\n        blacs::Cblacs_pcoord(ctxt, 1, &prow, &pcol);\n        if (prow == 0 && pcol == 1) {\n            ordering = costa::scalapack::ordering::row_major;\n        }\n    }\n    return ordering;\n}\n\nint cosma::scalapack::get_grid_context(const int* desca, const int* descb, const int* descc) {\n    int ctxt = desca[1];\n    // all matrices should belong to the same context\n    assert(desca[1] == descb[1]);\n    assert(descb[1] == descc[1]);\n    return ctxt;\n}\n\nint cosma::scalapack::get_grid_context(const int* desc) {\n    return desc[1];\n}\n\nint cosma::scalapack::leading_dimension(const int* desc) {\n    return desc[8];\n}\n\n// queries the grid blacs context to get the communication blacs context\nint cosma::scalapack::get_comm_context(const int grid_context) {\n    int comm_context;\n    blacs::Cblacs_get(grid_context, 10, &comm_context);\n    return comm_context;\n}\n\n// gets MPI_Comm from the grid blacs context\nMPI_Comm cosma::scalapack::get_communicator(const int grid_context) {\n    int comm_context = get_comm_context(grid_context);\n    MPI_Comm comm = blacs::Cblacs2sys_handle(comm_context);\n    return comm;\n}\n\n// computes the number of rows or columns that the specified rank owns\nint cosma::scalapack::numroc(int n, int nb, int proc_coord, int proc_src, int n_procs) {\n    // Arguments:\n    /*\n      - n: global matrix dimension (rows or columns)\n      - nb: corresponding dimension of a block\n      - proc_coord: coordinate of the process for which we are querying\n      - proc_src: process src\n      - n_procs: total number of processes along this dimension\n     */\n    // number of whole blocks along this dimension\n    int n_blocks = n / nb;\n\n    // the offset of given process to the source process\n    // make sure it stays positive\n    int proc_offset = (n_procs + proc_coord - proc_src) % n_procs;\n\n    // number of blocks per process (at least)\n    // Can also be zero.\n    int n_blocks_per_process = n_blocks/n_procs;\n    // Number of rows or columns that each process has (at least).\n    // Can also be zero.\n    int n_rows_or_cols_per_process = n_blocks_per_process * nb;\n\n    // each rank owns at least this base\n    int n_rows_or_columns_total = n_rows_or_cols_per_process;\n\n    // if there is a remainder, then the current \n    // process might own some additional blocks\n    int remainder = n_blocks % n_procs;\n\n    // possible additional \"whole\" blocks that\n    // the current rank owns\n    n_rows_or_columns_total += proc_offset < remainder ? nb : 0;\n    // possible additional \"partial\" blocks that \n    // the current ranks owns\n    n_rows_or_columns_total += proc_offset == remainder ? n % nb : 0;\n\n    return n_rows_or_columns_total;\n}\n\n// minimum lld: used mostly for correctness checking of pxgemm parameters\nint cosma::scalapack::min_leading_dimension(int n, int nb, int rank_grid_dim) {\n    // Arguments:\n    /*\n      - n: global matrix dimension (rows or columns)\n      - nb: corresponding dimension of a block\n      - rank_grid_dim: total number of processes along this dimension\n     */\n    // number of blocks along this dimension\n    int n_blocks = n / nb;\n\n    // number of blocks per process (at least)\n    // Can also be zero.\n    int n_blocks_per_process = n_blocks/rank_grid_dim;\n    // Number of rows or columns that each process has (at least).\n    // Can also be zero.\n    // each rank owns at least this many rows\n    int min_n_rows_or_cols_per_process = n_blocks_per_process * nb;\n\n    return min_n_rows_or_cols_per_process;\n}\n\n// maximum lld: used mostly for correctness checking of pxgemm parameters\nint cosma::scalapack::max_leading_dimension(int n, int nb, int rank_grid_dim) {\n    // Arguments:\n    /*\n      - n: global matrix dimension (rows or columns)\n      - nb: corresponding dimension of a block\n      - rank_grid_dim: total number of processes along this dimension\n     */\n    int lld = min_leading_dimension(n, nb, rank_grid_dim);\n    int n_blocks = n / nb;\n    int remainder = n_blocks % rank_grid_dim;\n    lld += (remainder == 0) ? (n % nb) : nb;\n    return lld;\n}\n\nint cosma::scalapack::local_buffer_size(const int* desc) {\n    int lld = leading_dimension(desc);\n\n    int n_cols = desc[3]; // global matrix size (columns)\n    int nb_cols = desc[5]; // block size (columns)\n    int src_proc = desc[7]; // processor src (columns)\n\n    int ctxt = desc[1];\n\n    int nprow, npcol, myrow, mycol;\n    blacs::Cblacs_gridinfo(ctxt, &nprow, &npcol, &myrow, &mycol);\n\n    int P = nprow * npcol;\n\n    int n_local_cols = numroc(n_cols, nb_cols, mycol, src_proc, npcol);\n\n    return lld * n_local_cols;\n}\n"
  },
  {
    "path": "src/cosma/scalapack.hpp",
    "content": "#pragma once\n\n#include <cosma/blacs.hpp>\n\n#include <costa/grid2grid/scalapack_layout.hpp>\n\n#include <cassert>\n\nnamespace cosma {\nnamespace scalapack {\nstruct block_size {\n    int rows = 0;\n    int cols = 0;\n\n    block_size() = default;\n    block_size(int rows, int cols): rows(rows), cols(cols) {}\n    block_size(const int* desc) {\n        rows = desc[4];\n        cols = desc[5];\n    }\n\n\n};\n\nstruct global_matrix_size {\n    int rows = 0;\n    int cols = 0;\n\n    global_matrix_size() = default;\n    global_matrix_size(int rows, int cols): rows(rows), cols(cols) {}\n    global_matrix_size(const int* desc) {\n        rows = desc[2];\n        cols = desc[3];\n    }\n};\n\nstruct rank_src {\n    int row_src = 0;\n    int col_src = 0;\n\n    rank_src() = default;\n    rank_src(int rsrc, int csrc): row_src(rsrc), col_src(csrc) {}\n    rank_src(const int* desc) {\n        row_src = desc[6];\n        col_src = desc[7];\n    }\n};\n\ncosta::scalapack::ordering rank_ordering(int ctxt, int P);\n\n// gets the grid context from descriptors of A, B and C and compares\n// if all three matrices belong to the same context\nint get_grid_context(const int* desca, const int* descb, const int* descc);\n// same as previous, but just for a single matrix\nint get_grid_context(const int* desc);\n// gets the communication blacs context from the grid blacs context\nint get_comm_context(const int grid_context);\n// gets the MPI communicator from the grid blacs context\nMPI_Comm get_communicator(const int grid_context);\n\n// minimum leading dimension (independent of current rank)\n// used mostly for checking the correctness of parameters\nint min_leading_dimension(int n, int nb, int rank_grid_dim);\n// maximum leading dimension (independent of current rank)\n// used mostly for checking the correctness of parameters\nint max_leading_dimension(int n, int nb, int rank_grid_dim);\n\nint leading_dimension(const int* desc);\n\nint numroc(int n, int nb, int iproc, int isrcproc, int nprocs);\n\nint local_buffer_size(const int* desc);\n}}\n"
  },
  {
    "path": "src/cosma/statistics.hpp",
    "content": "#include <cosma/matrix.hpp>\n#include <cosma/strategy.hpp>\n\nnamespace cosma {\n/* Simulates the algorithm (without actually computing the matrix\n   multiplication) and outputs the following information:\n       * total volume of the communication\n       * maximum volume of computation done in a single branch\n       * maximum buffer size that the algorithm requires\n       * size of matrix (m, n, k) in the base case with the maximum\n   computational volume\n */\n\nCosmaMatrix<double> *matrixA;\nCosmaMatrix<double> *matrixB;\nCosmaMatrix<double> *matrixC;\n\nlong long total_communication = 0;\nlong long max_buffer_size = 0;\nlong long max_total_computation = 0;\nint local_m = 0;\nint local_n = 0;\nint local_k = 0;\n\nvoid multiply(const Strategy &strategy);\n\nvoid multiply(Interval &m,\n              Interval &n,\n              Interval &k,\n              Interval &P,\n              int step,\n              const Strategy &strategy,\n              double beta,\n              int rank);\n\nvoid local_multiply(int m, int n, int k, double beta);\n\nvoid sequential(Interval &m,\n                Interval &n,\n                Interval &k,\n                Interval &P,\n                int step,\n                const Strategy &strategy,\n                double beta,\n                int rank);\n\nvoid parallel(Interval &m,\n              Interval &n,\n              Interval &k,\n              Interval &P,\n              int step,\n              const Strategy &strategy,\n              double beta,\n              int rank);\n\n// Assumption: we assume that at each step only 1 dimension is split\nvoid multiply(const Strategy &strategy, int n_rep = 1) {\n    Interval mi = Interval(0, strategy.m - 1);\n    Interval ni = Interval(0, strategy.n - 1);\n    Interval ki = Interval(0, strategy.k - 1);\n    Interval Pi = Interval(0, strategy.P - 1);\n\n    bool dry_run = true;\n    // Declare A,B and COSMA matrices objects\n    matrixA = new CosmaMatrix<double>('A', strategy, 0, dry_run);\n    matrixB = new CosmaMatrix<double>('B', strategy, 0, dry_run);\n    matrixC = new CosmaMatrix<double>('C', strategy, 0, dry_run);\n\n    // simulate the algorithm for each rank\n    for (int rank = 0; rank < Pi.length(); ++rank) {\n        multiply(mi, ni, ki, Pi, 0, strategy, 0.0, rank);\n    }\n\n    free(matrixA);\n    free(matrixB);\n    free(matrixC);\n\n    std::cout << \"Total communication volume per rank in [GB]: \"\n              << sizeof(double) * total_communication / (strategy.P * 1e9) << std::endl;\n    std::cout << \"Total computation units: \" << max_total_computation\n              << std::endl;\n    std::cout << \"Max buffer size: \" << max_buffer_size << std::endl;\n    std::cout << \"Local m = \" << local_m << std::endl;\n    std::cout << \"Local n = \" << local_n << std::endl;\n    std::cout << \"Local k = \" << local_k << std::endl;\n}\n\n// dispatch to local call, parallel, or sequential as appropriate\nvoid multiply(Interval &m,\n              Interval &n,\n              Interval &k,\n              Interval &P,\n              int step,\n              const Strategy &strategy,\n              double beta,\n              int rank) {\n    // current submatrices that are being computed\n    Interval2D a_range(m, k);\n    Interval2D b_range(k, n);\n    Interval2D c_range(m, n);\n\n    // For each of P processors remember which sequential bucket we are\n    // currently on\n    std::vector<int> bucketA = matrixA->seq_buckets(P);\n    std::vector<int> bucketB = matrixB->seq_buckets(P);\n    std::vector<int> bucketC = matrixC->seq_buckets(P);\n\n    // Skip all buckets that are \"before\" the current submatrices.\n    // the relation submatrix1 <before> submatrix2 is defined in Interval2D.\n    // Intuitively, this will skip all the buckets that are \"above\" or \"on the\n    // left\" of the current submatrices. We say \"before\" because whenever we\n    // split sequentially, we always first start with the \"above\" submatrix (if\n    // the splitting is horizontal) or with the left one (if the splitting is\n    // vertical). which explains the name of the relation \"before\".\n    matrixA->update_buckets(P, a_range);\n    matrixB->update_buckets(P, b_range);\n    matrixC->update_buckets(P, c_range);\n\n    if (strategy.final_step(step) || strategy.empty())\n        local_multiply(m.length(), n.length(), k.length(), beta);\n    else {\n        if (strategy.parallel_step(step))\n            parallel(m, n, k, P, step, strategy, beta, rank);\n        else\n            sequential(m, n, k, P, step, strategy, beta, rank);\n    }\n\n    // Revert the buckets pointers to their previous values.\n    matrixA->set_seq_buckets(P, bucketA);\n    matrixB->set_seq_buckets(P, bucketB);\n    matrixC->set_seq_buckets(P, bucketC);\n}\n\nvoid local_multiply(int m, int n, int k, double beta) {\n    long long comp = m * n * k;\n    if (comp > max_total_computation) {\n        max_total_computation = comp;\n        local_m = m;\n        local_n = n;\n        local_k = k;\n    }\n}\n\n/*\n  In each sequential step, one of the dimensions is split, and each of the\n  subproblems is solved sequentially by all P processors.\n*/\nvoid sequential(Interval &m,\n                Interval &n,\n                Interval &k,\n                Interval &P,\n                int step,\n                const Strategy &strategy,\n                double beta,\n                int rank) {\n    // split the dimension but not the processors,\n    // all P processors are taking part in each substep\n    if (strategy.split_m(step)) {\n        for (int M = 0; M < strategy.divisor(step); ++M) {\n            Interval newm = m.subinterval(strategy.divisor(step), M);\n            multiply(newm, n, k, P, step + 1, strategy, beta, rank);\n        }\n        return;\n    }\n\n    if (strategy.split_n(step)) {\n        for (int N = 0; N < strategy.divisor(step); ++N) {\n            Interval newn = n.subinterval(strategy.divisor(step), N);\n            multiply(m, newn, k, P, step + 1, strategy, beta, rank);\n        }\n        return;\n    }\n\n    // if divided by k, then the result of each subproblem is just a partial\n    // result for C which should all be summed up. We solve this by letting beta\n    // parameter be 1 in substeps that follow so that dgemm automatically adds\n    // up the subsequent results to the previous partial results of C.\n    if (strategy.split_k(step)) {\n        for (int K = 0; K < strategy.divisor(step); ++K) {\n            Interval newk = k.subinterval(strategy.divisor(step), K);\n            multiply(m,\n                     n,\n                     newk,\n                     P,\n                     step + 1,\n                     strategy,\n                     (K == 0) && (beta == 0) ? 0 : 1,\n                     rank);\n        }\n        return;\n    }\n}\n\ntemplate <typename T>\nT which_is_expanded(T A, T B, T C, const Strategy &strategy, size_t step) {\n    // divn > 1 => divm==divk==1 => matrix A has not been splitted\n    // therefore it is expanded (in the communication of a parallel step)\n    if (strategy.split_n(step))\n        return A;\n\n    // divm > 1 => divk==divn==1 => matrix B has not been splitted\n    // therefore it is expanded (in the communication of a parallel step)\n    if (strategy.split_m(step))\n        return B;\n\n    // divk > 1 => divm==divn==1 => matrix C has not been splitted\n    // therefore it is expanded (in the communication of a parallel step)\n    if (strategy.split_k(step))\n        return C;\n\n    // this should never happen\n    return C;\n}\n\nvoid parallel(Interval &m,\n              Interval &n,\n              Interval &k,\n              Interval &P,\n              int step,\n              const Strategy &strategy,\n              double beta,\n              int rank) {\n    int div = strategy.divisor(step);\n    int divm = strategy.divisor_m(step);\n    int divn = strategy.divisor_n(step);\n    int divk = strategy.divisor_k(step);\n\n    // processor subinterval which the current rank belongs to\n    int partition_idx = P.subinterval_index(div, rank);\n    Interval newP = P.subinterval(div, partition_idx);\n    // intervals of M, N and K that the current rank is in charge of,\n    // together with other ranks from its group.\n    // (see the definition of group and offset below)\n    Interval newm = m.subinterval(divm, divm > 1 ? partition_idx : 0);\n    Interval newn = n.subinterval(divn, divn > 1 ? partition_idx : 0);\n    Interval newk = k.subinterval(divk, divk > 1 ? partition_idx : 0);\n\n    int offset = rank - newP.first();\n\n    /*\n     * size_before_expansion:\n         maps rank i from interval P to the vector [bucket1.size(),\n     bucket2.size()...] containing buckets which are inside \"range\" that rank i\n     owns\n\n     * total_before_expansion:\n         maps rank i from interval P to the sum of all buckets inside\n     size_before_expansion[i]\n\n     * size_after_expansion:\n         maps rank i from interval newP to the vector of [bucket1.size(),\n     bucket2.size()...] but each bucket here is expanded, i.e. each bucket size\n     in this vector is actually the sum of the sizes of this bucket in all the\n     ranks from the communication ring of the current rank.\n\n     * total_after_expansion:\n         maps rank i from interval P to the sum of all buckets inside\n     size_after_expansion[i]\n    */\n    std::vector<std::vector<int>> size_before_expansion(P.length());\n    std::vector<int> total_before_expansion(P.length());\n    std::vector<std::vector<int>> size_after_expansion(newP.length());\n    std::vector<int> total_after_expansion(newP.length());\n\n    /*\n     * this gives us the 2D interval of the matrix that will be expanded:\n         if divm > 1 => matrix B expanded => Interval2D(k, n)\n         if divn > 1 => matrix A expanded => Interval2D(m, k)\n         if divk > 1 => matrix C expanded => Interval2D(m, n)\n    */\n    Interval row_copy = which_is_expanded(m, k, m, strategy, step);\n    Interval col_copy = which_is_expanded(k, n, n, strategy, step);\n    Interval2D range(row_copy, col_copy);\n\n    /*\n     * this gives us a matrix that is expanded:\n         if divm > 1 => matrix B is expanded\n         if divn > 1 => matrix A is expanded\n         if divk > 1 => matrix C is expanded\n    */\n    CosmaMatrix<double> *expanded_mat =\n        which_is_expanded(matrixA, matrixB, matrixC, strategy, step);\n    // gets the buffer sizes before and after expansion.\n    // this still does not modify the buffer sizes inside layout\n    // it just tells us what they would be.\n    expanded_mat->buffers_before_expansion(\n        P, range, size_before_expansion, total_before_expansion);\n\n    expanded_mat->buffers_after_expansion(P,\n                                          newP,\n                                          size_before_expansion,\n                                          total_before_expansion,\n                                          size_after_expansion,\n                                          total_after_expansion);\n\n    // increase the buffer sizes before the substeps\n    expanded_mat->set_sizes(newP, size_after_expansion);\n    // this is the sum of sizes of all the buckets after expansion\n    // that the current rank will own.\n    // which is also the size of the matrix after expansion\n    long long new_size = total_after_expansion[offset];\n    max_buffer_size = std::max(max_buffer_size, new_size);\n    int received_volume = new_size - total_before_expansion[rank - P.first()];\n    // total_communication += received_volume;\n    total_communication += new_size;\n\n    // invoke the substeps call with the new communicator containing ranks from\n    // newP observe that we have only one substeps branch per rank here (we are\n    // not entering a loop of substeps calls as in sequential steps since the\n    // current rank will only enter into one substeps branch since ranks are\n    // split).\n    multiply(newm, newn, newk, newP, step + 1, strategy, beta, rank);\n\n    // after the memory is freed, the buffer sizes are back to the previous\n    // values (the values at the beginning of this parallel step)\n    expanded_mat->set_sizes(\n        newP, size_before_expansion, newP.first() - P.first());\n}\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/strategy.cpp",
    "content": "#include <cosma/strategy.hpp>\n#include <cosma/environment_variables.hpp>\n\nnamespace cosma {\nint Strategy::get_min_dim_size() {\n    static int min_dim_size = get_min_local_dimension();\n    return min_dim_size;\n}\n\nstd::size_t Strategy::n_steps() const {\n    return divisors.size();\n}\n\n// constructors\nStrategy::Strategy() = default;\n// copy constructor\nStrategy::Strategy(Strategy& other) = default;\nStrategy::Strategy(const Strategy& other) = default;\n\n// == operator\nbool Strategy::operator==(const Strategy &other) const {\n    return \n        this->m == other.m\n        &&\n        this->n == other.n\n        &&\n        this->k == other.k\n        &&\n        this->P == other.P\n        &&\n        this->memory_limit == other.memory_limit\n        &&\n        this->divisors == other.divisors\n        &&\n        this->step_type == other.step_type\n        &&\n        this->split_dimension == other.split_dimension\n        &&\n        this->overlap_comm_and_comp == other.overlap_comm_and_comp;\n}\n\nbool Strategy::operator!=(const Strategy &other) const {\n    return !(*this == other);\n}\n\n// if the strategy proposed in divs, dims and types is not complete\n// (i.e. does not divide the problem completely), then\n// the strategy will try to use the proposed incomplete strategy\n// as the prefix and will try to complete it here.\nStrategy::Strategy(int mm,\n                   int nn,\n                   int kk,\n                   size_t PP,\n                   std::vector<int> &divs,\n                   std::string &dims,\n                   std::string &types,\n                   long long mem_limit,\n                   bool top,\n                   bool overlap,\n                   bool busy_waiting)\n    : m(mm)\n    , n(nn)\n    , k(kk)\n    , P(PP)\n    , memory_limit(mem_limit)\n    , divisors(divs)\n    , split_dimension(dims)\n    , step_type(types)\n    , topology(top)\n    , overlap_comm_and_comp(overlap)\n    , use_busy_waiting(busy_waiting) {\n\n    // if divisors are non-empty,\n    // then take it as a prefix to this strategy\n    bool incomplete_strategy = false;\n    square_strategy(incomplete_strategy);\n    check_if_valid();\n    check_if_irregular();\n    compute_min_sizes();\n}\n\nStrategy::Strategy(int mm,\n                   int nn,\n                   int kk,\n                   size_t PP,\n                   long long mem_limit,\n                   bool top,\n                   bool overlap,\n                   bool busy_waiting)\n    : m(mm)\n    , n(nn)\n    , k(kk)\n    , P(PP)\n    , memory_limit(mem_limit)\n    , topology(top)\n    , overlap_comm_and_comp(overlap)\n    , use_busy_waiting(busy_waiting) {\n    divisors.clear();\n    step_type = \"\";\n    split_dimension = \"\";\n    bool incomplete_strategy;\n    square_strategy(incomplete_strategy);\n    // compress_steps();\n    // optimize_strategy();\n    check_if_valid();\n    check_if_irregular();\n    compute_min_sizes();\n}\n\nstd::tuple<long long, long long, long long>\nStrategy::initial_memory(long long m, long long n, long long k, int P) {\n  return std::make_tuple(\n            math_utils::divide_and_round_up(m * k, P),\n            math_utils::divide_and_round_up(k * n, P),\n            math_utils::divide_and_round_up(m * n, P)\n    );\n}\n\nbool Strategy::add_step(long long& prev_m, long long& prev_n, long long& prev_k, \n                        int& prev_P, char step, char dim_label, int divisor) {\n    long long *dim1, *dim2, *dim3;\n    if (dim_label == 'm') {\n        dim1 = &prev_m;\n        dim2 = &prev_n;\n        dim3 = &prev_k;\n    } else if (dim_label == 'n') {\n        dim1 = &prev_n;\n        dim2 = &prev_m;\n        dim3 = &prev_k;\n    } else {\n        dim1 = &prev_k;\n        dim2 = &prev_m;\n        dim3 = &prev_n;\n    }\n    // if dimension becomes too small\n    // try to correct it (by finding the smaller divisor)\n    // or completely ignore this step \n    // if such a divisor cannot be found\n    if (*dim1/divisor < get_min_dim_size()) {\n        // try to find smaller divisor\n        int new_d = *dim1 / get_min_dim_size();\n        // check if this divisor is feasible\n        if (new_d > 1 && *dim1 / new_d >= get_min_dim_size()) {\n            split_dimension += dim_label;\n            step_type += step;\n            divisors.push_back(new_d);\n            *dim1 /= new_d;\n            // decrease the number of processes\n            // by exchanging the divisor d with new_d\n            if (step == 'p') {\n                // change the global P as well, because the global\n                // number of processors has to be decreased as well\n                P = P / divisor * new_d;\n                // let it look like we performed this step\n                prev_P = prev_P / divisor * new_d;\n            }\n            return true;\n        } else {\n            // exclude this divisor\n            // ignore this step\n            if (step == 'p') {\n                // change the global P as well, because the global\n                // number of processors has to be decreased as well\n                P = P / divisor;\n                // let it look like we performed this step\n                prev_P = prev_P / divisor;\n            }\n            return false;\n        }\n    } else {\n        split_dimension += dim_label;\n        step_type += step;\n        divisors.push_back(divisor);\n        *dim1 /= divisor;\n        // decrease the number of processes\n        // by exchanging the divisor d with new_d\n        if (step == 'p') {\n            // do not change the global number of processors in this case\n            prev_P = prev_P / divisor;\n        }\n        return true;\n    }\n}\n\n\nbool Strategy::divide(std::vector<int> &div_factors,\n                      int &dim_i,\n                      long long &m,\n                      long long &n,\n                      long long &k,\n                      int &P,\n                      const char label) {\n    long long dim1, dim2, dim3;\n    if (label == 'm') {\n        dim1 = m;\n        dim2 = n;\n        dim3 = k;\n    } else if (label == 'n') {\n        dim1 = n;\n        dim2 = m;\n        dim3 = k;\n    } else {\n        dim1 = k;\n        dim2 = m;\n        dim3 = n;\n    }\n\n    int next_div = 1;\n    int accumulated_div = 1;\n    bool did_parallel = false;\n\n    if (dim_i < div_factors.size()) {\n        next_div = div_factors[dim_i];\n    }\n\n    bool largest = dim1 >= std::max(dim2, dim3);\n    bool first_run = true;\n\n    // std::cout << \"m-split divide and round = \" <<\n    // math_utils::divide_and_round_up(k * n * next_div, P) << std::endl;\n    // std::cout << \"m / acc_div = \" << m/accumulated_div << std::endl;\n    // if m largest => split it\n    while (dim_i < div_factors.size() && (largest || first_run)) {\n        accumulated_div = next_div;\n        did_parallel = true;\n        dim_i++;\n        // i++;\n        if (dim_i >= div_factors.size())\n            break;\n        next_div *= div_factors[dim_i];\n\n        first_run = false;\n        largest = dim1 / accumulated_div >= std::max(dim2, dim3);\n    }\n\n    if (did_parallel) {\n        // i--;\n        return add_step(m, n, k, P, 'p', label, accumulated_div);\n    }\n\n    return did_parallel;\n}\n\nstd::tuple<long long, long long, long long>\nmaximum_memory(long long m, long long n, long long k, \n               int divm, int divn, int divk, int P) {\n    using dim_pair = std::tuple<long long, int, char>;\n    std::vector<dim_pair> dims = {\n      std::make_tuple(m, divm, 'B'),\n      std::make_tuple(n, divn, 'A'),\n      std::make_tuple(k, divk, 'C')\n    };\n    std::sort(dims.begin(), dims.end(),\n              [](const dim_pair& a, const dim_pair& b) -> bool {\n                  return std::get<0>(a) > std::get<0>(b) ||\n                         std::get<0>(a) == std::get<0>(b) && \n                         std::get<1>(a) < std::get<1>(b);\n              }\n    );\n\n    long long memory_A = 0;\n    long long memory_B = 0;\n    long long memory_C = 0;\n\n    for (std::size_t i = 0; i < dims.size(); ++i) {\n        auto& dim = dims[i];\n        auto div = std::get<1>(dim);\n        if (div > 1) {\n            auto& next_dim = dims[(i+1) % 3];\n            auto& next_next_dim = dims[(i+2) % 3];\n            auto copied_matrix_size = std::get<0>(next_dim) * std::get<0>(next_next_dim);\n            auto memory = math_utils::divide_and_round_up(copied_matrix_size * div, P);\n\n            auto label = std::get<2>(dim);\n            if (label == 'A') {\n                memory_A = memory;\n            } else if (label == 'B') {\n                memory_B = memory;\n            } else {\n                memory_C = memory;\n            }\n            P /= div;\n            std::get<0>(dim) /= div;\n        }\n    }\n    return std::make_tuple(memory_A, memory_B, memory_C);\n}\n\nlong long memory_with_buffer_optimization(std::vector<long long>& memory_A,\n                                          std::vector<long long>& memory_B,\n                                          std::vector<long long>& memory_C) {\n    long long total_memory = 0;\n\n    // sort (descreasingly) memory for each communication round\n    std::sort(memory_A.rbegin(), memory_A.rend());\n    std::sort(memory_B.rbegin(), memory_B.rend());\n    std::sort(memory_C.rbegin(), memory_C.rend());\n\n    // take the 2 largest elements, because the memory\n    // is reused in each communication per matrix\n    // memory for matrix A\n    if (memory_A.size() > 0) {\n        total_memory += memory_A[0];\n    }\n    if (memory_A.size() > 1) {\n        total_memory += memory_A[1];\n    }\n\n    // memory for matrix B\n    if (memory_B.size() > 0) {\n        total_memory += memory_B[0];\n    }\n    if (memory_B.size() > 1) {\n        total_memory += memory_B[1];\n    }\n\n    // memory for matrix C\n    if (memory_C.size() > 0) {\n        total_memory += memory_C[0];\n    }\n    if (memory_C.size() > 1) {\n        total_memory += memory_C[1];\n    }\n\n    return total_memory;\n}\n\nvoid Strategy::square_strategy(bool& incomplete_strategy) {\n    long long m = this->m;\n    long long n = this->n;\n    long long k = this->k;\n    int P = this->P;\n\n    // total needed memory\n    memory_used = 0;\n\n    // initial needed memory\n    long long init_memory_A, init_memory_B, init_memory_C;\n    std::tie(init_memory_A, init_memory_B, init_memory_C) = initial_memory(m, n, k, P);\n\n    // total memory required for each matrix without buffer optimization\n    std::vector<long long> memory_A = {init_memory_A};\n    std::vector<long long> memory_B = {init_memory_B};\n    std::vector<long long> memory_C = {init_memory_C};\n\n    for (int i = 0; i < divisors.size(); ++i) {\n        int div = divisors[i];\n\n        if (step_type[i] == 'p') {\n            if (!split_A(i)) {\n                memory_A.push_back(math_utils::divide_and_round_up(m * k * div, P));\n            } else if (!split_B(i)) {\n                memory_B.push_back(math_utils::divide_and_round_up(k * n * div, P));\n            } else {\n                memory_C.push_back(math_utils::divide_and_round_up(m * n * div, P));\n            }\n            P /= div;\n        }\n\n        m /= divisor_m(i);\n        n /= divisor_n(i);\n        k /= divisor_k(i);\n    }\n\n\n    // if P == 1 at this point, then it means that the complete strategy was already given\n    // at the beginning, so do not try to modify it further.\n    incomplete_strategy = P > 1;\n\n    if (!incomplete_strategy) {\n        memory_used = memory_with_buffer_optimization(memory_A, memory_B, memory_C);\n\n        if (memory_limit < memory_used) {\n            throw_exception(\n                std::string(\"This multiplication requires the memory \") +\n                \"for at least \" + std::to_string(memory_used) +\n                \" units, but only \" + std::to_string(memory_limit) +\n                \" units are allowed. Either increase the memory limit \" +\n                \"or change the strategy by using more sequential \" + \"steps.\");\n        }\n        return;\n    }\n\n    int divm, divn, divk;\n    std::tie(divm, divn, divk) = \n        math_utils::balanced_divisors(m, n, k, P, get_min_dim_size());\n\n    long long additional_memory_A = 0;\n    long long additional_memory_B = 0;\n    long long additional_memory_C = 0;\n\n    std::tie(additional_memory_A, additional_memory_B, additional_memory_C) \n        = maximum_memory(m, n, k, divm, divn, divk, P);\n\n    std::vector<long long> new_memory_A = memory_A;\n    std::vector<long long> new_memory_B = memory_B;\n    std::vector<long long> new_memory_C = memory_C;\n\n    new_memory_A.push_back(additional_memory_A);\n    new_memory_B.push_back(additional_memory_B);\n    new_memory_C.push_back(additional_memory_C);\n\n    // if not enough memory for all of the proposed parallel steps\n    // then perform a single sequential step and recompute again\n    // best divm, divn, divk for the smaller problem\n    long long used =\n        memory_with_buffer_optimization(new_memory_A,\n                                        new_memory_B,\n                                        new_memory_C);\n    while (used > memory_limit) {\n        int div = 2;\n        bool success = false;\n\n        if (m >= std::max(k, n)) {\n            // if m largest => split it\n            success = add_step(m, n, k, P, 's', 'm', div);\n        } else if (n >= std::max(m, k)) {\n            // if n largest => split it\n            success = add_step(m, n, k, P, 's', 'n', div);\n        } else {\n            // if k largest => split it\n            success = add_step(m, n, k, P, 's', 'k', div);\n        }\n\n        if (!success) {\n            throw_exception(std::string(\"Not enough memory for this strategy. \")\n                  + \"Either decrease the min_dim_size in the strategy \"\n                  + \"to allow dimensions to be further split OR \"\n                  + \"increase the memory limit in the strategy \"\n                  + \"to allow COSMA to use more memory.\");\n        }\n\n        std::tie(divm, divn, divk) = \n            math_utils::balanced_divisors(m, n, k, P, get_min_dim_size());\n\n        std::tie(additional_memory_A, additional_memory_B, additional_memory_C) \n            = maximum_memory(m, n, k, divm, divn, divk, P);\n\n        new_memory_A = memory_A;\n        new_memory_B = memory_B;\n        new_memory_C = memory_C;\n\n        new_memory_A.push_back(additional_memory_A);\n        new_memory_B.push_back(additional_memory_B);\n        new_memory_C.push_back(additional_memory_C);\n        used =\n            memory_with_buffer_optimization(new_memory_A,\n                                            new_memory_B,\n                                            new_memory_C);\n    }\n\n    memory_used = used;\n\n    P = divm * divn * divk;\n\n    // find prime factors of divm, divn, divk\n    std::vector<int> divm_factors = math_utils::decompose(divm);\n    std::vector<int> divn_factors = math_utils::decompose(divn);\n    std::vector<int> divk_factors = math_utils::decompose(divk);\n\n    int mi, ni, ki;\n    mi = ni = ki = 0;\n\n    int total_divisors =\n        divm_factors.size() + divn_factors.size() + divk_factors.size();\n\n    // Iterate through all prime factors of divm, divn and divk and\n    // divide each dimension with corresponding prime factors as long\n    // as that dimension is the largest one.\n    // Instead of dividing immediately m/divm, n/divn and k/divk,\n    // it's always better to divide the dimension with smaller factors first\n    // that are large enough to make that dimension NOT be the largest one\n    // after division\n    while (mi + ni + ki < total_divisors) {\n        int i = mi + ni + ki;\n        bool did_parallel = false;\n\n        long long mm = mi >= divm_factors.size() ? 1 : m;\n        long long nn = ni >= divn_factors.size() ? 1 : n;\n        long long kk = ki >= divk_factors.size() ? 1 : k;\n\n        if (mm >= std::max(nn, kk)) {\n            did_parallel =\n                divide(divm_factors, mi, m, n, k, P, 'm');\n            if (did_parallel)\n                continue;\n        }\n\n        if (nn >= std::max(mm, kk)) {\n            did_parallel =\n                divide(divn_factors, ni, m, n, k, P, 'n');\n            if (did_parallel)\n                continue;\n        }\n\n        if (kk >= std::max(mm, nn)) {\n            did_parallel =\n                divide(divk_factors, ki, m, n, k, P, 'k');\n            if (did_parallel)\n                continue;\n        }\n\n        if (!did_parallel) {\n            throw_exception(std::string(\"Not enough memory for this strategy. \")\n                  + \"Either decrease the min_dim_size in the strategy \"\n                  + \"to allow dimensions to be further split OR \"\n                  + \"increase the memory limit in the strategy \"\n                  + \"to allow COSMA to use more memory.\");\n        }\n    }\n\n    std::string step_type_shorter = \"\";\n    std::string split_dimension_shorter = \"\";\n    std::vector<int> divisors_shorter;\n    this->P = 1;\n\n    for (int i = 0; i < divisors.size(); ++i) {\n        if (step_type[i] == 'p') {\n            int div = divisors[i];\n            while (i + 1 < divisors.size() && step_type[i + 1] == 'p' &&\n                   split_dimension[i + 1] == split_dimension[i]) {\n                div *= divisors[i + 1];\n                i++;\n            }\n            step_type_shorter += \"p\";\n            split_dimension_shorter += split_dimension[i];\n            divisors_shorter.push_back(div);\n            this->P *= div;\n            continue;\n        }\n\n        int j = i;\n        int divm = 1;\n        int divn = 1;\n        int divk = 1;\n\n        while (step_type[j] == 's') {\n            if (split_dimension[j] == 'm')\n                divm *= divisors[j];\n            else if (split_dimension[j] == 'n')\n                divn *= divisors[j];\n            else\n                divk *= divisors[j];\n            j++;\n        }\n\n        if (divm > 1) {\n            split_dimension_shorter += \"m\";\n            step_type_shorter += \"s\";\n            divisors_shorter.push_back(divm);\n        }\n        if (divn > 1) {\n            split_dimension_shorter += \"n\";\n            step_type_shorter += \"s\";\n            divisors_shorter.push_back(divn);\n        }\n        if (divk > 1) {\n            split_dimension_shorter += \"k\";\n            step_type_shorter += \"s\";\n            divisors_shorter.push_back(divk);\n        }\n\n        i = j - 1;\n    }\n\n    split_dimension = split_dimension_shorter;\n    step_type = step_type_shorter;\n    divisors = divisors_shorter;\n}\n\nvoid Strategy::throw_exception(const std::string &message) {\n    std::cout << \"Splitting strategy not well defined.\\n\";\n    std::cout << message << std::endl;\n    std::cout << *this << std::endl;\n    throw std::runtime_error(message);\n}\n\nbool Strategy::split_m(size_t i) const { return split_dimension[i] == 'm'; }\n\nbool Strategy::split_n(size_t i) const { return split_dimension[i] == 'n'; }\n\nbool Strategy::split_k(size_t i) const { return split_dimension[i] == 'k'; }\n\nbool Strategy::split_A(size_t i) const { return split_m(i) || split_k(i); }\n\nbool Strategy::split_B(size_t i) const { return split_k(i) || split_n(i); }\n\nbool Strategy::split_C(size_t i) const { return split_m(i) || split_n(i); }\n\nbool Strategy::split(char label, size_t step) const {\n    if (label == 'A')\n        return split_A(step);\n    else if (label == 'B')\n        return split_B(step);\n    else\n        return split_C(step);\n}\n\nbool Strategy::sequential_step(size_t i) const { return step_type[i] == 's'; }\n\nbool Strategy::parallel_step(size_t i) const { return step_type[i] == 'p'; }\n\nint Strategy::divisor(size_t i) const { return divisors[i]; }\n\nint Strategy::divisor_m(size_t i) const { return split_m(i) ? divisors[i] : 1; }\n\nint Strategy::divisor_n(size_t i) const { return split_n(i) ? divisors[i] : 1; }\n\nint Strategy::divisor_k(size_t i) const { return split_k(i) ? divisors[i] : 1; }\n\nint Strategy::divisor_row(char matrix, size_t i) const {\n    if (matrix == 'A')\n        return divisor_m(i);\n    if (matrix == 'B')\n        return divisor_k(i);\n    if (matrix == 'C')\n        return divisor_m(i);\n    return 1;\n}\n\nint Strategy::divisor_col(char matrix, size_t i) const {\n    if (matrix == 'A')\n        return divisor_k(i);\n    if (matrix == 'B')\n        return divisor_n(i);\n    if (matrix == 'C')\n        return divisor_n(i);\n    return 1;\n}\n\nbool Strategy::final_step(size_t i) const { return i == n_steps(); }\n\nint Strategy::parallel_steps_before_gemm(char label) const {\n    if (label == 'A')\n        return n_parallel_steps_before_gemm_a;\n    if (label == 'B')\n        return n_parallel_steps_before_gemm_b;\n    if (label == 'C')\n        return n_parallel_steps_before_gemm_c;\n    return -1;\n}\n\n// checks if the strategy is well-defined\nvoid Strategy::check_if_valid() {\n#ifdef DEBUG\n    std::cout << \"Checking if the following strategy is valid: \" << std::endl;\n    std::cout << *this << std::endl;\n#endif\n    if (empty() && P != 1) {\n        throw_exception(\"Strategy empty but number of ranks P != 1\");\n    }\n\n    int mi = m;\n    int ni = n;\n    int ki = k;\n    int Pi = P;\n\n    n_parallel_steps = 0;\n    n_parallel_steps_before_gemm_a = 0;\n    n_parallel_steps_before_gemm_b = 0;\n    n_parallel_steps_before_gemm_c = 0;\n\n    int P_a = 1;\n    int P_b = 1;\n    int P_c = 1;\n\n    for (size_t i = 0; i < n_steps(); ++i) {\n        if (divisors[i] <= 1) {\n            throw_exception(\n                std::string(\"Divisors in each step must be larger than 1.\") +\n                \"Divisor in step \" + std::to_string(i) + \" = \" +\n                std::to_string(divisors[i]) + \".\");\n        }\n\n        if (split_dimension[i] != 'm' && split_dimension[i] != 'n' &&\n            split_dimension[i] != 'k') {\n            throw_exception(\"Split dimension in each step must be m, n or k\");\n        }\n\n        if (step_type[i] != 'p' && step_type[i] != 's') {\n            throw_exception(\"Step type should be either p or s.\");\n        }\n\n        if (step_type[i] == 'p') {\n            n_parallel_steps++;\n            if (!split_A(i)) {\n                n_parallel_steps_before_gemm_a++;\n            }\n            if (!split_B(i)) {\n                n_parallel_steps_before_gemm_b++;\n            }\n            if (!split_C(i)) {\n                n_parallel_steps_before_gemm_c++;\n            }\n\n            if (Pi <= 1) {\n                throw_exception(\n                    std::string(\n                        \"Not enough processors for this division strategy.\") +\n                    \"The product of all divisors in a parallel step should be \"\n                    \"equal \" +\n                    \"to the number of processors\");\n            }\n\n            if (Pi % divisors[i] != 0) {\n                throw_exception(std::string(\"The number of processors left in \"\n                                            \"each parallel step \") +\n                                \"should be divisible by divisor.\");\n            }\n\n            Pi /= divisors[i];\n        } else {\n            n_sequential_steps++;\n            if (split_A(i)) {\n                n_parallel_steps_before_gemm_a = 0;\n            }\n            if (split_B(i)) {\n                n_parallel_steps_before_gemm_b = 0;\n            }\n            if (split_C(i)) {\n                n_parallel_steps_before_gemm_c = 0;\n            }\n        }\n\n        if (step_type[i] == 'p') {\n            if (!split_A(i)) {\n                P_a *= divisors[i];\n            } else if (!split_B(i)) {\n                P_b *= divisors[i];\n            } else if (!split_C(i)) {\n                P_c *= divisors[i];\n            } else {\n                throw_exception(\"Invalid strategy: In each step, some matrix has to be split.\");\n            }\n        }\n\n        if (split_dimension[i] == 'm') {\n            mi /= divisors[i];\n        } else if (split_dimension[i] == 'n') {\n            ni /= divisors[i];\n        } else if (split_dimension[i] == 'k') {\n            ki /= divisors[i];\n        } else {\n            throw_exception(\"Unknown splitting dimension, should be m, n or k\");\n        }\n\n        // if last step, check if #columns >= #processors that share this block\n        // of matrix we only check dimensions n and k, because these are the\n        // dimensions defining the number of columns, i.e. dimension m does not\n        // denote the #columns of any matrix\n        if (i == n_steps() - 1) {\n            // since we are using column major ordering, the #columns of each\n            // matrix must be at least the number of processors left at that\n            // step\n            if (ki < P_a) {\n                throw_exception(std::string(\"Dimension k at step \") +\n                                std::to_string(i) + \" = \" +\n                                std::to_string(ki) +\n                                \", which is less than the number of \"\n                                \"processors left = \" +\n                                std::to_string(P_a));\n            }\n            if (ni < std::max(P_b, P_c)) {\n                throw_exception(std::string(\"Dimension n at step \") +\n                                std::to_string(i) + \" = \" +\n                                std::to_string(ni) +\n                                \", which is less than the number of \"\n                                \"processors left = \" +\n                                std::to_string(std::min(P_b, P_c)));\n            }\n        }\n    }\n    if (Pi != 1) {\n        throw_exception(\n            std::string(\n                \"Too many processors. The number of processors should be \") +\n            \"equal to the product of divisors in all parallel steps.\");\n    }\n\n    /*\n    memory_used = required_memory(*this);\n    // check if we have enough memory for this splitting strategy\n    if (memory_limit < memory_used) {\n        throw_exception(\"The splitting strategy requires memory \\\n                         for roughly \" + std::to_string(memory_used) + \" elements, \\\n                         but the memory limit is only \" + std::to_string(memory_limit) + \" elements. \\\n                         Either increase the memory limit or change the strategy. \\\n                         (Hint: you could use some sequential steps to spare some memory!)\");\n    }\n    */\n}\n\nvoid Strategy::compress_steps() {\n    int p_divm = 1;\n    int p_divn = 1;\n    int p_divk = 1;\n    int s_divm = 1;\n    int s_divn = 1;\n    int s_divk = 1;\n\n    for (size_t i = 0; i < split_dimension.size(); ++i) {\n        if (parallel_step(i)) {\n            p_divm *= divisor_m(i);\n            p_divn *= divisor_n(i);\n            p_divk *= divisor_k(i);\n        } else {\n            s_divm *= divisor_m(i);\n            s_divn *= divisor_n(i);\n            s_divk *= divisor_k(i);\n        }\n    }\n\n    std::vector<int> divs = {p_divm, p_divn, p_divk, s_divm, s_divn, s_divk};\n\n    divisors = std::vector<int>();\n    split_dimension = \"\";\n    step_type = \"\";\n\n    for (size_t i = 0; i < divs.size(); ++i) {\n        if (divs[i] > 1) {\n            divisors.push_back(divs[i]);\n\n            if (i < 3) {\n                step_type += \"p\";\n            } else {\n                step_type += \"s\";\n            }\n\n            if (i % 3 == 0) {\n                split_dimension += \"m\";\n            } else if (i % 3 == 1) {\n                split_dimension += \"n\";\n            } else {\n                split_dimension += \"k\";\n            }\n        }\n    }\n}\n\nvoid Strategy::compute_min_sizes() {\n    min_m = m;\n    min_n = n;\n    min_k = k;\n    for (int step = 0; step < n_steps(); ++step) {\n        min_m /= divisor_m(step);\n        min_n /= divisor_n(step);\n        min_k /= divisor_k(step);\n    }\n}\n\nbool Strategy::should_overlap_comm_and_comp(int step) const {\n    bool last_step = step == n_steps() - 1;\n    if (!last_step) {\n        return false;\n    }\n\n    int div = divisor(step);\n    int divm = divisor_m(step);\n    int divn = divisor_n(step);\n    int divk = divisor_k(step);\n\n    int newm = min_m;\n    int newn = min_n;\n    int newk = min_k;\n\n    // overlap requires that the number of columns of the expanded matrix\n    // i.e. the matrix that is not split is >= div, so that it can be split as\n    // well\n    bool overlap_possible = (split_m(step) && min_n >= div) ||\n                            (split_n(step) && min_k >= div) ||\n                            (split_k(step) && min_n >= div);\n\n    if (split_m(step)) {\n        newn /= div;\n    } else if (split_n(step)) {\n        newk /= div;\n    } else {\n        newn /= div;\n    }\n\n    bool overlap_turned_on = overlap_comm_and_comp;\n    double score_no_overlap = math_utils::square_score(min_m, min_n, min_k);\n    double score_with_overlap = math_utils::square_score(newm, newn, newk);\n    auto diff = score_with_overlap - score_no_overlap;\n    bool should_overlap = diff / score_no_overlap >= 0.5;\n\n    // std::cout << \"overlap_possible = \" << overlap_possible << std::endl;\n    // std::cout << \"last_step = \" << last_step << std::endl;\n    // std::cout << \"overlap_turned_on = \" << overlap_turned_on << std::endl;\n    // std::cout << \"score_no_overlap = \" << score_no_overlap << std::endl;\n    // std::cout << \"score_with_overlap = \" << score_with_overlap << std::endl;\n    // std::cout << \"should_overlap = \" << should_overlap << std::endl;\n    bool condition = overlap_possible && overlap_turned_on && should_overlap;\n\n#ifdef DEBUG\n    std::cout << \"Overlapping communication and computation.\" << std::endl;\n#endif\n\n    // return condition;\n    return condition;\n}\n\nbool Strategy::empty() const {\n    return n_steps() == 0;\n}\n\nint Strategy::n_rows(char label) const {\n    if (label == 'A') \n        return m;\n    if (label == 'B') \n        return k;\n    if (label == 'C') \n        return m;\n    return -1;\n}\n\nint Strategy::n_cols(char label) const {\n    if (label == 'A') \n        return k;\n    if (label == 'B') \n        return n;\n    if (label == 'C') \n        return n;\n\n    return -1;\n}\n\n// enables overlapping and updates the value of the `irregular` variable\nvoid Strategy::enable_overlapping_comm_and_comp() {\n    int last_step = n_steps() - 1;\n\n    // if comm and comp are overlapped, then in the last step\n    // the #columns of the matrix which was not split in that step\n    // are being split by the same divisor to allow the overlap\n    if (split_m(last_step) && min_n >= divisor_m(last_step)) {\n        // overlap only possible if min_n >= divisor_m(last_step)\n        overlap_comm_and_comp = true;\n        if (overlap_comm_and_comp) {\n            // if m is split, then B is not split and thus min_n is also split\n            irregular = irregular || (min_n % divisor_m(last_step) != 0);\n        }\n    } else if (split_n(last_step) && min_k >= divisor_n(last_step)) {\n        overlap_comm_and_comp = true;\n        if (overlap_comm_and_comp) {\n            // if n is split, then A is not split and thus min_k is also split\n            irregular = irregular || (min_k % divisor_n(last_step) != 0);\n        }\n    } else if (split_k(last_step) && min_n >= divisor_k(last_step)) {\n        overlap_comm_and_comp = true;\n        if (overlap_comm_and_comp) {\n            // if k is split, then C is not split and thus min_n is also split\n            irregular = irregular || (min_n % divisor_k(last_step) != 0);\n        }\n    }\n}\n\n// the strategy is considered irregular if any dimension\n// (at any step) is divided by a divisor that does not perfectly\n// divide that dimension\nvoid Strategy::check_if_irregular() {\n    int mm = m;\n    int nn = n;\n    int kk = k;\n    for (int i = 0; i < n_steps(); ++i) {\n        if (mm % divisor_m(i) != 0) {\n            irregular = true;\n            return;\n        }\n        if (nn % divisor_n(i) != 0) {\n            irregular = true;\n            return;\n        }\n        if (kk % divisor_k(i) != 0) {\n            irregular = true;\n            return;\n        }\n        mm /= divisor_m(i);\n        nn /= divisor_n(i);\n        kk /= divisor_k(i);\n    }\n    irregular = false;\n}\n\nstd::ostream &operator<<(std::ostream &os, const Strategy &other) {\n    os << \"Matrix dimensions (m, n, k) = (\" << other.m << \", \" << other.n\n       << \", \" << other.k << \")\\n\";\n    os << \"Number of processors: \" << other.P << \"\\n\";\n    if (other.topology) {\n        os << \"Communication-aware topology turned on.\\n\";\n    }\n    if (other.overlap_comm_and_comp) {\n        os << \"Overlap of communication and computation: ON.\\n\";\n        if (other.use_busy_waiting) {\n            os << \"Communication-thread policy (for overlap): \"\n               << \"busy-waiting (using blocking one-sided MPI).\\n\";\n        } else {\n            os << \"Communication-thread policy (for overlap): \"\n               << \"polling (using non-blocking one-sided MPI).\\n\";\n        }\n    } else {\n        os << \"Overlap of communication and computation: OFF.\\n\";\n    }\n    os << \"Divisions strategy: \\n\";\n    for (size_t i = 0; i < other.n_steps(); ++i) {\n        if (other.step_type[i] == 'p') {\n            os << \"parallel (\" << other.split_dimension[i] << \" / \"\n               << other.divisors[i] << \")\\n\";\n        } else {\n            os << \"sequential (\" << other.split_dimension[i] << \" / \"\n               << other.divisors[i] << \")\\n\";\n        }\n    }\n    os << \"Required memory per rank (in #elements): \" << other.memory_used\n       << \"\\n\";\n    os << \"Available memory per rank (in #elements): \";\n    if (other.memory_limit < std::numeric_limits<long long>::max())\n        os << other.memory_limit;\n    else\n        os << \"not specified (assumed: infinite)\";\n    os << \"\\n\";\n    return os;\n}\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/strategy.hpp",
    "content": "#pragma once\n\n#include <unordered_map>\n#include <iostream>\n#include <limits>\n#include <math.h>\n#include <sstream>\n#include <stdexcept>\n#include <string>\n#include <tuple>\n#include <vector>\n\n#include <cosma/math_utils.hpp>\n\nnamespace cosma {\nclass Strategy {\n  public:\n    // matrix dimensions\n    int m = 0;\n    int n = 0;\n    int k = 0;\n    // number of processors\n    size_t P = 0;\n\n    long long memory_limit = 0;\n\n    // minimum problem size per rank\n    // the total number of ranks will be reduced\n    // if the problem size per rank is too small\n    // by default = 200\n    int get_min_dim_size();\n\n    // the actual minimum problem size\n    // that is induced by given strategy\n    int min_m = 0;\n    int min_n = 0;\n    int min_k = 0;\n\n    // stores the divisor in each step of the algorithm\n    std::vector<int> divisors = {};\n    // returns m, n or k character depending on\n    // which dimension was split in each step\n    std::string split_dimension = \"\";\n    // describes whether a sequential step (s) or a parallel step (p) is used in\n    // each step\n    std::string step_type = \"\";\n    // if true, MPI will try to relabel ranks such that\n    // the ranks which communicate are physically close to each other\n    bool topology = false;\n    // if true, uses busy waiting in the thread performing MPI communication\n    // otherwise, uses polling to query if the communication request has\n    // completed\n    bool use_busy_waiting = true;\n    long long memory_used = 0;\n    int n_parallel_steps = 0;\n    int n_sequential_steps = 0;\n\n    int n_parallel_steps_before_gemm_a = 0;\n    int n_parallel_steps_before_gemm_b = 0;\n    int n_parallel_steps_before_gemm_c = 0;\n\n    // constructors\n    Strategy();\n    // copy constructor\n    Strategy(Strategy &other);\n    Strategy(const Strategy &other);\n\n    // Strategy& operator=(const Strategy& other) = default;\n    // Strategy& operator=(Strategy& other) = default;\n\n    Strategy(int mm,\n             int nn,\n             int kk,\n             size_t PP,\n             std::vector<int> &divs,\n             std::string &dims,\n             std::string &types,\n             long long mem_limit = std::numeric_limits<long long>::max(),\n             bool top = false,\n             bool overlap = false,\n             bool busy_waiting = true);\n\n    Strategy(int mm,\n             int nn,\n             int kk,\n             size_t PP,\n             long long mem_limit = std::numeric_limits<long long>::max(),\n             bool top = false,\n             bool overlap = false,\n             bool busy_waiting = true);\n\n    // number of steps of the algorithm\n    size_t n_steps() const;\n\n    // strategy that tries to make each base case as square as possible\n    // it always uses all the resources (all P available ranks) but tries to\n    // find divm, divn and divk such that divm * divn * divk = P and m/divm =\n    // n/divn = k/divk. if there is not enough memory in some step, then a\n    // sequential step is performed and new divm, divn and divk are found that\n    // correspond to the new subproblem.\n    void square_strategy(bool& should_optimize);\n\n    bool add_step(long long& prev_m, long long& prev_n, long long& prev_k,\n                  int& prev_P, char step, char dim_label, int divisor);\n\n    void throw_exception(const std::string &message);\n\n    bool split_m(size_t i) const;\n    bool split_n(size_t i) const;\n    bool split_k(size_t i) const;\n\n    bool split_A(size_t i) const;\n    bool split_B(size_t i) const;\n    bool split_C(size_t i) const;\n    bool split(char label, size_t i) const;\n\n    bool sequential_step(size_t i) const;\n    bool parallel_step(size_t i) const;\n\n    int divisor(size_t i) const;\n    int divisor_m(size_t i) const;\n    int divisor_n(size_t i) const;\n    int divisor_k(size_t i) const;\n\n    int divisor_row(char matrix, size_t i) const;\n    int divisor_col(char matrix, size_t i) const;\n\n    bool final_step(size_t i) const;\n    int parallel_steps_before_gemm(char label) const;\n\n    static std::tuple<long long, long long, long long>\n    initial_memory(long long m, long long n, long long k, int P);\n\n    // checks if the strategy is well-defined\n    void check_if_valid();\n    void check_if_overlap_possible();\n    // prefers a single division by (a*b) over two divisions (one by a and one\n    // by b)\n    void compress_steps();\n\n    bool should_overlap_comm_and_comp(int step) const;\n\n    bool operator==(const Strategy &other) const;\n    bool operator!=(const Strategy &other) const;\n\n    friend std::ostream &operator<<(std::ostream &os, const Strategy &other);\n\n    void compute_min_sizes();\n\n    // if number of processes is 0, then n_steps = 0\n    // and then the strategy is considered empty\n    bool empty() const;\n\n    // returns dimensions of a matrix with given label\n    // where label = A, B or C\n    int n_rows(char label) const;\n    int n_cols(char label) const;\n\n    void enable_overlapping_comm_and_comp();\n\n    void check_if_irregular();\n\n    // the strategy is considered irregular if any dimension\n    // (at any step) is divided by a divisor that does not perfectly\n    // divide that dimension\n    bool irregular = true;\n\n  private:\n    // if true, the communication and computation will be overlapped\n    // this variable should not be changed outside of the class but only\n    // through the function `enable_overlapping_comm_and_comp`.\n    // because this function also has to update the variable `irregular`\n    // when the overlap is turned on.\n    bool overlap_comm_and_comp = false;\n\n    bool divide(std::vector<int> &div_factors,\n                int &dim_i,\n                long long &dim1,\n                long long &dim2,\n                long long &dim3,\n                int &P,\n                const char label);\n};\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/timer.hpp",
    "content": "#pragma once\n\n#include <chrono>\n#include <iostream>\n#include <mpi.h>\n#include <string>\n\nnamespace cosma {\nclass Timer {\n  public:\n    using time_point =\n        std::chrono::time_point<std::chrono::high_resolution_clock>;\n\n    int n_rep_;\n    std::string region;\n    MPI_Comm comm_;\n    time_point start;\n\n    Timer(int n_rep, std::string reg = \"\", MPI_Comm comm = MPI_COMM_WORLD)\n        : n_rep_(n_rep)\n        , region(reg)\n        , comm_(comm) {\n        MPI_Barrier(comm);\n        start = std::chrono::high_resolution_clock::now();\n    }\n\n    ~Timer() {\n        auto finish = std::chrono::high_resolution_clock::now();\n        std::chrono::duration<double> elapsed = finish - start;\n        auto time =\n            std::chrono::duration_cast<std::chrono::milliseconds>(elapsed)\n                .count();\n        long long max_time, min_time, sum_time;\n        MPI_Reduce(&time, &max_time, 1, MPI_LONG_LONG, MPI_MAX, 0, comm_);\n        MPI_Reduce(&time, &min_time, 1, MPI_LONG_LONG, MPI_MIN, 0, comm_);\n        MPI_Reduce(&time, &sum_time, 1, MPI_LONG_LONG, MPI_SUM, 0, comm_);\n        int rank, size;\n        MPI_Comm_rank(comm_, &rank);\n        MPI_Comm_size(comm_, &size);\n        if (rank == 0) {\n            std::cout << region << \" MIN TIME [ms]: \" << 1.0 * min_time / n_rep_\n                      << std::endl;\n            std::cout << region << \" MAX TIME [ms]: \" << 1.0 * max_time / n_rep_\n                      << std::endl;\n            std::cout << region\n                      << \" AVG TIME [ms]: \" << 1.0 * sum_time / (n_rep_ * size)\n                      << std::endl;\n            std::cout << \"\\n\";\n        }\n    }\n};\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/two_sided_communicator.cpp",
    "content": "#include <cosma/interval.hpp>\n#include <cosma/math_utils.hpp>\n#include <cosma/matrix.hpp>\n#include <cosma/mpi_mapper.hpp>\n#include <cosma/profiler.hpp>\n#include <cosma/strategy.hpp>\n#include <cosma/two_sided_communicator.hpp>\n\n#include <mpi.h>\n\n#include <algorithm>\n#include <atomic>\n#include <chrono>\n#include <complex>\n#include <future>\n#include <iostream>\n#include <stdlib.h>\n#include <thread>\n#include <tuple>\n\nnamespace cosma {\n\nnamespace two_sided_communicator {\n// two_sided_communicator() = default;\n// two_sided_communicator(const Strategy* strategy, MPI_Comm comm):\n//     communicator::communicator(strategy, comm) {}\n\n/*\n * (first see the comment in communicator.hpp)\n * The idea is the following:\n *      - if only 1 block per rank should be communicated:\n *        don't allocate new space, just perform all-gather\n *\n *      - if more than 1 blocks per rank should be communicated:\n *        allocate new space and let the all-gather be performed\n *        on the level of all blocks per rank. After the communication,\n *        reshuffle the local data by putting first blocks from each rank\n * first, then all second blocks from each rank and so on.\n */\ntemplate <typename Scalar>\nvoid copy(MPI_Comm comm,\n          int rank,\n          int div,\n          Interval &P,\n          Scalar *in,\n          Scalar *out,\n          Scalar *reshuffle_buffer,\n          std::vector<std::vector<int>> &size_before,\n          std::vector<int> &total_before,\n          int total_after) {\n    PE(multiply_communication_other);\n    // int div = strategy_->divisor(step);\n    // MPI_Comm subcomm = active_comm(step);\n    int gp, off;\n    std::tie(gp, off) = P.locate_in_subinterval(div, rank);\n\n    int relative_rank = rank - P.first();\n    int local_size = total_before[relative_rank];\n\n    int sum = 0;\n    std::vector<int> total_size(div);\n    std::vector<int> dspls(div);\n    // int off = offset(P, div);\n\n    std::vector<int> subgroup(div);\n    bool same_size = true;\n\n    for (int i = 0; i < div; ++i) {\n        int target = P.locate_in_interval(div, i, off);\n        int temp_size = total_before[target];\n        dspls[i] = sum;\n        sum += temp_size;\n        total_size[i] = temp_size;\n        same_size &= temp_size == local_size;\n    }\n\n    int n_blocks = size_before[relative_rank].size();\n    Scalar *receive_pointer = n_blocks > 1 ? reshuffle_buffer : out;\n    PL();\n\n    auto mpi_type = mpi_mapper<Scalar>::getType();\n    PE(multiply_communication_copy);\n    if (same_size) {\n        MPI_Allgather(in,\n                      local_size,\n                      mpi_type,\n                      receive_pointer,\n                      local_size,\n                      mpi_type,\n                      comm);\n    } else {\n        MPI_Allgatherv(in,\n                       local_size,\n                       mpi_type,\n                       receive_pointer,\n                       total_size.data(),\n                       dspls.data(),\n                       mpi_type,\n                       comm);\n    }\n    PL();\n\n    PE(multiply_communication_other);\n    if (n_blocks > 1) {\n        int index = 0;\n        std::vector<int> block_offset(div);\n        // order all first sequential parts of all groups first and so on..\n        for (int block = 0; block < n_blocks; block++) {\n            for (int rank = 0; rank < div; rank++) {\n                int target = P.locate_in_interval(div, rank, off);\n                int dsp = dspls[rank] + block_offset[rank];\n                int b_size = size_before[target][block];\n                std::copy(reshuffle_buffer + dsp,\n                          reshuffle_buffer + dsp + b_size,\n                          out + index);\n                index += b_size;\n                block_offset[rank] += b_size;\n            }\n        }\n    }\n    PL();\n#ifdef DEBUG\n    std::cout << \"Content of the copied matrix in rank \" << rank\n              << \" is now: \" << std::endl;\n    for (int j = 0; j < sum; j++) {\n        std::cout << out[j] << \" , \";\n    }\n    std::cout << std::endl;\n\n#endif\n}\n\ntemplate <typename Scalar>\nvoid reduce(MPI_Comm comm,\n            int rank,\n            int div,\n            Interval &P,\n            Scalar *LC, // expanded_matrix\n            Scalar *C,  // original matrix\n            Scalar *reshuffle_buffer,\n            Scalar *reduce_buffer,\n            std::vector<std::vector<int>> &c_current,\n            std::vector<int> &c_total_current,\n            std::vector<std::vector<int>> &c_expanded,\n            std::vector<int> &c_total_expanded,\n            Scalar beta) {\n    PE(multiply_communication_other);\n    // int div = strategy_->divisor(step);\n    // MPI_Comm subcomm = active_comm(step);\n\n    std::vector<int> subgroup(div);\n\n    int gp, off;\n    std::tie(gp, off) = P.locate_in_subinterval(div, rank);\n    // int gp, off;\n    // std::tie(gp, off) = group_and_offset(P, div);\n\n    // reorder the elements as:\n    // first all blocks that should be sent to rank 0 then all blocks for\n    // rank 1 and so on...\n    int n_blocks = c_expanded[off].size();\n    std::vector<int> block_offset(n_blocks);\n    Scalar *send_pointer = n_blocks > 1 ? reshuffle_buffer : LC;\n\n    int sum = 0;\n    for (int i = 0; i < n_blocks; ++i) {\n        block_offset[i] = sum;\n        sum += c_expanded[off][i];\n    }\n\n    std::vector<int> recvcnts(div);\n\n    bool same_size = true;\n    int index = 0;\n    // go through the communication ring\n    for (int i = 0; i < div; ++i) {\n        int target = P.locate_in_interval(div, i, off);\n        recvcnts[i] = c_total_current[target];\n\n        same_size = same_size && recvcnts[i] == recvcnts[0];\n\n        if (n_blocks > 1) {\n            for (int block = 0; block < n_blocks; ++block) {\n                int b_offset = block_offset[block];\n                int b_size = c_current[target][block];\n                std::copy(LC + b_offset,\n                          LC + b_offset + b_size,\n                          reshuffle_buffer + index);\n                index += b_size;\n                block_offset[block] += b_size;\n            }\n        }\n    }\n\n    Scalar *receive_pointer = beta != Scalar{0} ? reduce_buffer : C;\n    PL();\n\n    auto mpi_type = mpi_mapper<Scalar>::getType();\n    PE(multiply_communication_reduce);\n\n    if (same_size) {\n        MPI_Reduce_scatter_block(send_pointer,\n                           receive_pointer,\n                           recvcnts[0],\n                           mpi_type,\n                           MPI_SUM,\n                           comm);\n    } else {\n        MPI_Reduce_scatter(send_pointer,\n                           receive_pointer,\n                           recvcnts.data(),\n                           mpi_type,\n                           MPI_SUM,\n                           comm);\n    }\n    PL();\n\n    PE(multiply_communication_other);\n    if (beta != Scalar{0}) {\n        // sum up receiving_buffer with C\n        for (int el = 0; el < recvcnts[gp]; ++el) {\n            C[el] = beta * C[el] + reduce_buffer[el];\n        }\n    }\n    PL();\n}\n\ntemplate void copy<float>(MPI_Comm comm,\n                          int rank,\n                          int div,\n                          Interval &P,\n                          float *in,\n                          float *out,\n                          float *reshuffle_buffer,\n                          std::vector<std::vector<int>> &size_before,\n                          std::vector<int> &total_before,\n                          int total_after);\n\ntemplate void copy<double>(MPI_Comm comm,\n                           int rank,\n                           int div,\n                           Interval &P,\n                           double *in,\n                           double *out,\n                           double *reshuffle_buffer,\n                           std::vector<std::vector<int>> &size_before,\n                           std::vector<int> &total_before,\n                           int total_after);\n\ntemplate void\ncopy<std::complex<float>>(MPI_Comm comm,\n                          int rank,\n                          int div,\n                          Interval &P,\n                          std::complex<float> *in,\n                          std::complex<float> *out,\n                          std::complex<float> *reshuffle_buffer,\n                          std::vector<std::vector<int>> &size_before,\n                          std::vector<int> &total_before,\n                          int total_after);\n\ntemplate void\ncopy<std::complex<double>>(MPI_Comm comm,\n                           int rank,\n                           int div,\n                           Interval &P,\n                           std::complex<double> *in,\n                           std::complex<double> *out,\n                           std::complex<double> *reshuffle_buffer,\n                           std::vector<std::vector<int>> &size_before,\n                           std::vector<int> &total_before,\n                           int total_after);\n\ntemplate void reduce<float>(MPI_Comm comm,\n                            int rank,\n                            int div,\n                            Interval &P,\n                            float *LC,\n                            float *C,\n                            float *reshuffle_buffer,\n                            float *reduce_buffer,\n                            std::vector<std::vector<int>> &c_current,\n                            std::vector<int> &c_total_current,\n                            std::vector<std::vector<int>> &c_expanded,\n                            std::vector<int> &c_total_expanded,\n                            float beta);\n\ntemplate void reduce<double>(MPI_Comm comm,\n                             int rank,\n                             int div,\n                             Interval &P,\n                             double *LC,\n                             double *C,\n                             double *reshuffle_buffer,\n                             double *reduce_buffer,\n                             std::vector<std::vector<int>> &c_current,\n                             std::vector<int> &c_total_current,\n                             std::vector<std::vector<int>> &c_expanded,\n                             std::vector<int> &c_total_expanded,\n                             double beta);\n\ntemplate void\nreduce<std::complex<float>>(MPI_Comm comm,\n                            int rank,\n                            int div,\n                            Interval &P,\n                            std::complex<float> *LC,\n                            std::complex<float> *C,\n                            std::complex<float> *reshuffle_buffer,\n                            std::complex<float> *reduce_buffer,\n                            std::vector<std::vector<int>> &c_current,\n                            std::vector<int> &c_total_current,\n                            std::vector<std::vector<int>> &c_expanded,\n                            std::vector<int> &c_total_expanded,\n                            std::complex<float> beta);\n\ntemplate void\nreduce<std::complex<double>>(MPI_Comm comm,\n                             int rank,\n                             int div,\n                             Interval &P,\n                             std::complex<double> *LC,\n                             std::complex<double> *C,\n                             std::complex<double> *reshuffle_buffer,\n                             std::complex<double> *reduce_buffer,\n                             std::vector<std::vector<int>> &c_current,\n                             std::vector<int> &c_total_current,\n                             std::vector<std::vector<int>> &c_expanded,\n                             std::vector<int> &c_total_expanded,\n                             std::complex<double> beta);\n\n} // end namespace two_sided_communicator\n\n} // namespace cosma\n"
  },
  {
    "path": "src/cosma/two_sided_communicator.hpp",
    "content": "#pragma once\n\n#include <cosma/interval.hpp>\n#include <cosma/math_utils.hpp>\n#include <cosma/matrix.hpp>\n#include <cosma/strategy.hpp>\n\n#include <mpi.h>\n\n#include <vector>\n\nnamespace cosma {\n\nnamespace two_sided_communicator {\n\n/*\n * (first see the comment in communicator.hpp)\n * The idea is the following:\n *      - if only 1 block per rank should be communicated:\n *        don't allocate new space, just perform all-gather\n *\n *      - if more than 1 blocks per rank should be communicated:\n *        allocate new space and let the all-gather be performed\n *        on the level of all blocks per rank. After the communication,\n *        reshuffle the local data by putting first blocks from each rank\n * first, then all second blocks from each rank and so on.\n */\ntemplate <typename Scalar>\nvoid copy(MPI_Comm comm,\n          int rank,\n          int div,\n          Interval &P,\n          Scalar *in,\n          Scalar *out,\n          Scalar *reshuffle_buffer,\n          std::vector<std::vector<int>> &size_before,\n          std::vector<int> &total_before,\n          int total_after);\n\ntemplate <typename Scalar>\nvoid reduce(MPI_Comm comm,\n            int rank,\n            int div,\n            Interval &P,\n            Scalar *LC,\n            Scalar *C,\n            Scalar *reshuffle_buffer,\n            Scalar *reduce_buffer,\n            std::vector<std::vector<int>> &c_current,\n            std::vector<int> &c_total_current,\n            std::vector<std::vector<int>> &c_expanded,\n            std::vector<int> &c_total_expanded,\n            Scalar beta);\n\n} // namespace two_sided_communicator\n\n} // namespace cosma\n"
  },
  {
    "path": "tests/CMakeLists.txt",
    "content": "add_custom_target(tests COMMENT \"Builds all tests.\")\n\n# CACHE GTest main into an OBJECT library to avoid recompiling.\n#\nadd_library(main_gtest_mpi OBJECT main_gtest_mpi.cpp)\ntarget_link_libraries(main_gtest_mpi PRIVATE MPI::MPI_CXX gtest_mpi)\n\nadd_library(main_gtest OBJECT main_gtest.cpp)\ntarget_link_libraries(main_gtest PRIVATE gtest)\n\n#  Unit tests\n#\nfunction(add_cosma_mpi_test test_name num_procs)\n    set(tgt_ test.${test_name})\n    add_executable(${tgt_} ${test_name}.cpp)\n    target_link_libraries(${tgt_} PRIVATE main_gtest_mpi gtest_mpi ${ARGN})\n    target_include_directories(${tgt_} PRIVATE .)\n    add_test(NAME ${tgt_}\n             WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH}\n             COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${num_procs}\n                     ${MPIEXEC_PREFLAGS} ./${tgt_} ${MPIEXEC_POSTFLAGS})\n    add_dependencies(tests ${tgt_})\n    install(TARGETS ${tgt_} DESTINATION \"${CMAKE_INSTALL_BINDIR}\")\nendfunction()\n\nadd_executable(test.mapper mapper.cpp)\ntarget_link_libraries(test.mapper PRIVATE main_gtest gtest cosma)\nadd_test(NAME test.mapper COMMAND test.mapper)\nadd_dependencies(tests test.mapper)\ninstall(TARGETS test.mapper DESTINATION \"${CMAKE_INSTALL_BINDIR}\")\n\nif(NOT COSMA_SCALAPACK MATCHES \"OFF\")\n    add_cosma_mpi_test(pdgemm 16 cosma_pxgemm_cpp)\nendif()\n\nadd_cosma_mpi_test(multiply_using_layout 4 cosma)\nadd_cosma_mpi_test(multiply 16 cosma)\nadd_cosma_mpi_test(scalar_matmul 8 cosma)\n"
  },
  {
    "path": "tests/main_gtest.cpp",
    "content": "#include <gtest/gtest.h>\n\nint main(int argc, char **argv) {\n    // init gtest\n    ::testing::InitGoogleTest(&argc, argv);\n    // run all tests\n    return RUN_ALL_TESTS();\n}\n"
  },
  {
    "path": "tests/main_gtest_mpi.cpp",
    "content": "#include <gtest/gtest.h>\n#include <gtest_mpi/gtest_mpi.hpp>\n\nint main(int argc, char **argv) {\n    MPI_Init(&argc, &argv);\n\n    // init gtest\n    ::testing::InitGoogleTest(&argc, argv);\n\n    // new test environment (makes a copy of MPI_COMM_WORLD)\n    ::testing::AddGlobalTestEnvironment(new gtest_mpi::MPITestEnvironment());\n\n    auto &test_listeners = ::testing::UnitTest::GetInstance()->listeners();\n\n    // replace the default listener with the custom one\n    delete test_listeners.Release(test_listeners.default_result_printer());\n    test_listeners.Append(new gtest_mpi::PrettyMPIUnitTestResultPrinter());\n\n    // run all tests\n    auto exit_code = RUN_ALL_TESTS();\n\n    MPI_Finalize();\n\n    return exit_code;\n}\n"
  },
  {
    "path": "tests/mapper.cpp",
    "content": "#include <cosma/mapper.hpp>\n\n#include <gtest/gtest.h>\n\nusing namespace cosma;\nTEST(mapper, rpa_256) {\n    int k = 13893632;\n    int m = 34816;\n    int n = 34816;\n    int nodes = 1024;\n    int ranks_per_node = 1;\n    int P = nodes * ranks_per_node;\n    // memory_limit /= ranks_per_node;\n    //\n    std::string split_dimension = \"kmkm\";\n    std::string step_type = \"sspp\";\n    std::vector<int> divisors = {2, 2, 512, 2};\n\n    Strategy strategy(m, n, k, P, divisors, split_dimension, step_type);\n    std::cout << strategy << std::endl;\n\n    for (int rank = 0; rank < P; ++rank) {\n        EXPECT_NO_THROW(Mapper mapper_a('A', strategy, rank));\n        EXPECT_NO_THROW(Mapper mapper_b('B', strategy, rank));\n        EXPECT_NO_THROW(Mapper mapper_c('C', strategy, rank));\n\n        Mapper mapper_a('A', strategy, rank);\n        Mapper mapper_b('B', strategy, rank);\n        Mapper mapper_c('C', strategy, rank);\n\n        EXPECT_NO_THROW(auto cosma_grid_a = mapper_a.get_layout_grid());\n        EXPECT_NO_THROW(auto cosma_grid_b = mapper_b.get_layout_grid());\n        EXPECT_NO_THROW(auto cosma_grid_c = mapper_c.get_layout_grid());\n    }\n}\n\nTEST(strategy, rpa_128) {\n    int m = 17408;\n    int n = 17408;\n    int k = 3473408;\n    long long memory_limit = 52428800; // #elements per rank\n    int nodes = 128;\n    int ranks_per_node = 36;\n    int P = nodes * ranks_per_node;\n    // memory_limit /= ranks_per_node;\n\n    // Strategy::min_dim_size = 32;\n    Strategy strategy(m, n, k, P, memory_limit);\n\n    std::cout << \"Strategy = \" << strategy << std::endl;\n    std::cout << \"n seq steps = \" << strategy.n_sequential_steps << std::endl;\n\n    EXPECT_TRUE(strategy.n_sequential_steps > 0);\n\n    EXPECT_EQ(strategy.split_dimension, \"kkmnkmnkn\");\n\n    EXPECT_EQ(strategy.step_type, \"spppppppp\");\n\n    std::vector<int> target_divisors = {16, 16, 2, 2, 2, 3, 2, 3, 2};\n    EXPECT_EQ(strategy.divisors, target_divisors);\n}\n\nTEST(strategy, nested_sequential_parallel) {\n    int m = 30000;\n    int n = m;\n    int k = m;\n    long long memory_limit =\n        80000000; // #elements, per node, corresponding to 50GB\n    int nodes = 10;\n    int ranks_per_node = 36;\n    int P = nodes * ranks_per_node;\n    // memory_limit /= ranks_per_node;\n\n    // Strategy::min_dim_size = 32;\n    Strategy strategy(m, n, k, P, memory_limit);\n\n    std::cout << \"Strategy = \" << strategy << std::endl;\n    std::cout << \"n seq steps = \" << strategy.n_sequential_steps << std::endl;\n    // EXPECT_TRUE(strategy.n_seq_steps > 0);\n}\n\nTEST(mapper, bdb) {\n    auto m = 8u;\n    auto n = 4u;\n    auto k = 2u;\n    std::string types = \"psp\";\n    std::vector<int> divisors = {2, 2, 2};\n    std::string dims = \"mmn\";\n    auto P = 4u;\n\n    Strategy strategy(m, n, k, P, divisors, dims, types);\n\n    // the last element is rank = 0, but regardless of this parameter\n    // the mapper can compute the buffers sizes or the mapper for any rank\n    Mapper A('A', strategy, 0);\n    Mapper B('B', strategy, 0);\n    Mapper C('C', strategy, 0);\n\n    std::cout << \"A = \" << A.P() << std::endl;\n\n    // test initial sizes for all ranks\n    std::vector<int> A_initial_size_target = {4, 4, 4, 4};\n    std::vector<int> B_initial_size_target = {2, 2, 2, 2};\n    std::vector<int> C_initial_size_target = {8, 8, 8, 8};\n\n    for (auto i = 0u; i < P; ++i) {\n        EXPECT_EQ(A.initial_size(i), A_initial_size_target[i]);\n        EXPECT_EQ(B.initial_size(i), B_initial_size_target[i]);\n        EXPECT_EQ(C.initial_size(i), C_initial_size_target[i]);\n    }\n    // check if the precomputed local->global mapper for\n    // local elements on the current rank match\n    // the results from the on-demand computed local->global\n    // mapper for any element and any rank\n    // we only check if this holds for rank 0\n    for (auto i = 0u; i < A_initial_size_target[0]; ++i) {\n        int gi, gj;\n        std::tie(gi, gj) = A.global_coordinates(i, 0);\n        int gi_local, gj_local;\n        std::tie(gi_local, gj_local) = A.global_coordinates(i);\n        EXPECT_EQ(gi, gi_local);\n        EXPECT_EQ(gj, gj_local);\n    }\n\n    for (auto i = 0u; i < B_initial_size_target[0]; ++i) {\n        int gi, gj;\n        std::tie(gi, gj) = B.global_coordinates(i, 0);\n        int gi_local, gj_local;\n        std::tie(gi_local, gj_local) = B.global_coordinates(i);\n        EXPECT_EQ(gi, gi_local);\n        EXPECT_EQ(gj, gj_local);\n    }\n\n    for (auto i = 0u; i < C_initial_size_target[0]; ++i) {\n        int gi, gj;\n        std::tie(gi, gj) = C.global_coordinates(i, 0);\n        int gi_local, gj_local;\n        std::tie(gi_local, gj_local) = C.global_coordinates(i);\n        EXPECT_EQ(gi, gi_local);\n        EXPECT_EQ(gj, gj_local);\n    }\n\n    // test rank_to_range map which specified for each rank, the list of\n    // Interval2D it owns\n    std::vector<std::vector<Interval2D>> A_rank_to_range_target = {\n        {Interval2D(0, 1, 0, 0), Interval2D(2, 3, 0, 0)},\n        {Interval2D(0, 1, 1, 1), Interval2D(2, 3, 1, 1)},\n        {Interval2D(4, 5, 0, 0), Interval2D(6, 7, 0, 0)},\n        {Interval2D(4, 5, 1, 1), Interval2D(6, 7, 1, 1)}};\n\n    std::vector<Interval2D> B_rank_to_range_target = {Interval2D(0, 1, 0, 0),\n                                                      Interval2D(0, 1, 2, 2),\n                                                      Interval2D(0, 1, 1, 1),\n                                                      Interval2D(0, 1, 3, 3)};\n\n    std::vector<std::vector<Interval2D>> C_rank_to_range_target = {\n        {Interval2D(0, 1, 0, 1), Interval2D(2, 3, 0, 1)},\n        {Interval2D(0, 1, 2, 3), Interval2D(2, 3, 2, 3)},\n        {Interval2D(4, 5, 0, 1), Interval2D(6, 7, 0, 1)},\n        {Interval2D(4, 5, 2, 3), Interval2D(6, 7, 2, 3)}};\n\n    auto A_rank_to_range = A.complete_layout();\n    auto B_rank_to_range = B.complete_layout();\n    auto C_rank_to_range = C.complete_layout();\n\n    for (auto i = 0u; i < P; ++i) {\n        EXPECT_EQ(A_rank_to_range[i].size(), 2);\n        EXPECT_EQ(B_rank_to_range[i].size(), 1);\n        EXPECT_EQ(C_rank_to_range[i].size(), 2);\n\n        for (auto range = 0u; range < A_rank_to_range[i].size(); ++range) {\n            EXPECT_EQ(A_rank_to_range[i][range],\n                      A_rank_to_range_target[i][range]);\n        }\n\n        EXPECT_EQ(B_rank_to_range[i][0], B_rank_to_range_target[i]);\n\n        for (auto range = 0u; range < C_rank_to_range[i].size(); ++range) {\n            EXPECT_EQ(C_rank_to_range[i][range],\n                      C_rank_to_range_target[i][range]);\n        }\n    }\n\n    // test the mapping global<->local\n    std::vector<std::pair<int, int>> A_global_coord = {{0, 0},\n                                                       {1, 0},\n                                                       {2, 0},\n                                                       {3, 0},\n                                                       {0, 1},\n                                                       {1, 1},\n                                                       {2, 1},\n                                                       {3, 1},\n                                                       {4, 0},\n                                                       {5, 0},\n                                                       {6, 0},\n                                                       {7, 0},\n                                                       {4, 1},\n                                                       {5, 1},\n                                                       {6, 1},\n                                                       {7, 1}};\n\n    std::vector<std::pair<int, int>> B_global_coord = {\n        {0, 0}, {1, 0}, {0, 2}, {1, 2}, {0, 1}, {1, 1}, {0, 3}, {1, 3}};\n\n    auto i = 0u;\n    for (auto rank = 0u; rank < P; ++rank) {\n        for (auto locIdx = 0u; locIdx < A_initial_size_target[rank]; ++locIdx) {\n            int gi, gj;\n            std::tie(gi, gj) = A.global_coordinates(locIdx, rank);\n            EXPECT_EQ(A_global_coord[i].first, gi);\n            EXPECT_EQ(A_global_coord[i].second, gj);\n\n            int l, r;\n            std::tie(l, r) = A.local_coordinates(gi, gj);\n            EXPECT_EQ(l, locIdx);\n            EXPECT_EQ(r, rank);\n            ++i;\n        }\n    }\n\n    i = 0u;\n    for (auto rank = 0u; rank < P; ++rank) {\n        for (auto locIdx = 0u; locIdx < B_initial_size_target[rank]; ++locIdx) {\n            int gi, gj;\n            std::tie(gi, gj) = B.global_coordinates(locIdx, rank);\n            EXPECT_EQ(B_global_coord[i].first, gi);\n            EXPECT_EQ(B_global_coord[i].second, gj);\n\n            int l, r;\n            std::tie(l, r) = B.local_coordinates(gi, gj);\n            EXPECT_EQ(l, locIdx);\n            EXPECT_EQ(r, rank);\n            ++i;\n        }\n    }\n}\n"
  },
  {
    "path": "tests/multiply.cpp",
    "content": "#include \"../utils/cosma_utils.hpp\"\n#include <initializer_list>\n\n#include <gtest/gtest.h>\n#include <gtest_mpi/gtest_mpi.hpp>\n\nMPI_Comm subcommunicator(int new_P, MPI_Comm comm = MPI_COMM_WORLD) {\n    // original size\n    int P;\n    MPI_Comm_size(comm, &P);\n\n    // original group\n    MPI_Group group;\n    MPI_Comm_group(comm, &group);\n\n    // new comm and new group\n    MPI_Comm newcomm;\n    MPI_Group newcomm_group;\n\n    // ranks to exclude\n    std::vector<int> exclude_ranks;\n    for (int i = new_P; i < P; ++i) {\n        exclude_ranks.push_back(i);\n    }\n\n    // create reduced group\n    MPI_Group_excl(\n        group, exclude_ranks.size(), exclude_ranks.data(), &newcomm_group);\n    // create reduced communicator\n    MPI_Comm_create_group(comm, newcomm_group, 0, &newcomm);\n\n    MPI_Group_free(&group);\n    MPI_Group_free(&newcomm_group);\n\n    return newcomm;\n}\n\nstruct multiply_state {\n    int m = 10;\n    int n = 10;\n    int k = 10;\n    int P = 2;\n    std::vector<int> divs;\n    std::string dims = \"\";\n    std::string step_types = \"\";\n\n    multiply_state() = default;\n\n    multiply_state(int mm, int nn, int kk, int PP,\n                   std::vector<int> divisors,\n                   std::string dim,\n                   std::string steps)\n        : m(mm)\n        , n(nn)\n        , k(kk)\n        , P(PP)\n        , divs(divisors)\n        , dims(dim)\n        , step_types(steps)\n    {}\n\n    multiply_state(int mm, int nn, int kk, int PP)\n        : m(mm)\n        , n(nn)\n        , k(kk)\n        , P(PP)\n    {}\n\n    static int& get_test_counter() {\n        static int test_counter = 0;\n        return test_counter;\n    }\n\n    friend std::ostream &operator<<(std::ostream &os,\n                                    const multiply_state &obj) {\n        return os << \"(m, n, k) = (\" << obj.m << \", \" << obj.n << \", \" << obj.k\n                  << \")\" << std::endl\n                  << \"Number of ranks: \" << obj.P << std::endl\n                  << \"Strategy: \" << obj.dims << \", \" << obj.step_types << std::endl;\n    }\n};\n\nstruct MultiplyTest : testing::Test {\n    cosma::context<double> ctx;\n    std::unique_ptr<multiply_state> state;\n\n    MultiplyTest() {\n        ctx = cosma::make_context<double>();\n        state = std::make_unique<multiply_state>();\n    }\n};\n\nstruct MultiplyTestWithParams : MultiplyTest,\n                                testing::WithParamInterface<multiply_state> {\n    MultiplyTestWithParams() = default;\n};\n\nTEST_P(MultiplyTestWithParams, multiply) {\n    double epsilon = 1e-8;\n    auto state = GetParam();\n\n    MPI_Barrier(MPI_COMM_WORLD);\n\n    int rank;\n    MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n\n    int m = state.m;\n    int n = state.n;\n    int k = state.k;\n    int P = state.P;\n    MPI_Comm comm = subcommunicator(P);\n\n    if (rank >= P) {\n        ++multiply_state::get_test_counter();\n        ++multiply_state::get_test_counter();\n    }\n\n    if (rank < P) {\n        // Strategy::min_dim_size = 32;\n        Strategy strategy(m, n, k, P, state.divs, state.dims, state.step_types);\n        if (rank == 0) {\n            std::cout << \"Strategy = \" << strategy << std::endl;\n        }\n\n        // first run without overlapping communication and computation\n        bool no_overlap = test_cosma<double>(strategy, ctx, comm, epsilon,\n                                             multiply_state::get_test_counter());\n        ++multiply_state::get_test_counter();\n\n        EXPECT_TRUE(no_overlap);\n\n        // then run with the overlap of communication and computation\n        strategy.enable_overlapping_comm_and_comp();\n        bool with_overlap = test_cosma<double>(strategy, ctx, comm, epsilon, multiply_state::get_test_counter());\n        ++multiply_state::get_test_counter();\n        EXPECT_TRUE(with_overlap);\n\n        MPI_Comm_free(&comm);\n    }\n}\n\nstd::vector<multiply_state> generate_tests() {\n    return {\n        multiply_state(4, 4, 4, 1),\n        multiply_state(3, 4, 5, 1),\n\n        // strategy: pm2,sm2,pn2\n        multiply_state(8, 4, 2, 4,\n                    {2, 2, 2}, // divisors\n                    \"mmn\", // split dimensions\n                    \"psp\" // step types\n        ),\n        multiply_state(8, 4, 2, 4),\n\n        multiply_state(4, 4, 4, 2, \n                       {2},// divisors\n                       \"m\", // split dimensions\n                       \"p\" // step types\n        ),\n        multiply_state(4, 4, 4, 2),\n\n        multiply_state(4, 4, 4, 4, \n                       {2, 2, 2},// divisors\n                       \"mnn\", // split dimensions\n                       \"spp\" // step types\n        ),\n\n        // -strategy: sm2,pn2,pn2\n        multiply_state(4, 4, 4, 4, \n                       {2, 2, 2},// divisors\n                       \"mnn\", // split dimensions\n                       \"spp\" // step types\n        ),\n\n        multiply_state(30, 35, 40, 4),\n        // strategy: sm2,sm2,pn2\n        multiply_state(8, 8, 2, 2,\n            {2, 2, 2}, // divisors\n            \"mmn\", // split dimensions\n            \"ssp\" // step types\n        ),\n        multiply_state(8, 8, 2, 2),\n\n        // strategy: pm2,pm2\n        multiply_state(16, 4, 4, 4,\n                       {2, 2}, // divisors\n                       \"mm\", // split dimensions\n                       \"pp\" // step types\n        ),\n        multiply_state(16, 4, 4, 4),\n\n        // startegy: sk2,pm3\n        multiply_state(20, 20, 20, 3,\n                       {2, 3}, // divisors\n                       \"km\", // split dimensions\n                       \"sp\" // step types\n        ),\n        multiply_state(20, 20, 20, 3),\n\n        // strategy: pm2,pn2,pk2,pm2\n        multiply_state(16, 16, 16, 16,\n                       {2, 2, 2, 2}, // divisors\n                       \"mnkm\", // split dimensions\n                       \"pppp\" // step types\n        ),\n        multiply_state(16, 16, 16, 16),\n\n        // strategy: sm2,sn2,pk2,pm2\n        multiply_state(20, 30, 25, 4,\n                       {2, 2, 2, 2}, // divisors\n                       \"mnkm\", // split dimensions\n                       \"sspp\" // step types\n        ),\n        multiply_state(20, 30, 25, 4),\n\n        // strategy: sm2,pn2,sk2,pm5\n        multiply_state(100, 100, 100, 10,\n                       {2, 2, 2, 5}, // divisors\n                       \"mnkm\", // split dimensions\n                       \"spsp\" // step types\n        ),\n        multiply_state(100, 100, 100, 10),\n\n        // strategy: sm2,pn2,sk2,pm2\n        multiply_state(4, 4, 5, 4, \n                       {2, 2, 2, 2},  // divisors\n                       \"mnkm\", // split dimensions\n                       \"spsp\" // step types\n        ),\n\n        multiply_state(4, 4, 5, 4),\n\n        // strategy: pm2,pn2,pk3\n        multiply_state(10, 10, 10, 12,\n                       {2, 2, 3}, // divisors\n                       \"mnk\", // split dimensions\n                       \"ppp\" // step types\n        ),\n\n        // strategy: pm2,pn2,pk3\n        multiply_state(100, 100, 100, 12,\n                       {2, 2, 3}, // divisors\n                       \"mnk\", // split dimensions\n                       \"ppp\" // step types\n        ),\n\n        multiply_state(100, 100, 100, 12),\n\n        multiply_state(100, 100, 100, 4),\n\n        // strategy: pm7\n        multiply_state(100, 100, 100, 7,\n                       {7}, // divisors\n                       \"m\", // split dimensions\n                       \"p\" // step types\n        ),\n        multiply_state(100, 100, 100, 7),\n\n        // strategy: sm2,pn2,sk2,pm2,sn2,pk2\n        multiply_state(100, 100, 100, 8,\n                       {2, 2, 2, 2, 2, 2}, // divisors\n                       \"mnkmnk\", // split dimensions\n                       \"spspsp\" // step types\n        ),\n        multiply_state(100, 100, 100, 8),\n\n        // strategy: pm2,pk2\n        multiply_state(31, 32, 33, 4,\n                       {2, 2}, // divisors\n                       \"mk\", // split dimensions\n                       \"pp\" // step types\n        ),\n        // strategy: pm2,pk2\n        multiply_state(100, 100, 100, 4,\n                       {2, 2}, // divisors\n                       \"mk\", // split dimensions\n                       \"pp\" // step types\n        ),\n        // strategy: pm2,pk2,pn2\n        multiply_state(100, 100, 100, 8,\n                       {2, 2}, // divisors\n                       \"mk\", // split dimensions\n                       \"pp\" // step types\n        ),\n\n        // strategy: sm2,sk2,sn2,pn2,pm2,pk2\n        multiply_state(100, 100, 100, 8,\n                       {2, 2, 2, 2, 2, 2}, // divisors\n                       \"mknnmk\", // split dimensions\n                       \"sssppp\" // step types\n        ),\n\n        // strategy: sk2,pm2,sn2,pk2,sm2,pn2\n        multiply_state(100, 100, 100, 8,\n                       {2, 2, 2, 2, 2, 2}, // divisors\n                       \"kmnkmn\", // split dimensions\n                       \"spspsp\" // step types\n        ),\n\n        // strategy: sk3,sm3,sn3,pk2,pn2,pm2\n        multiply_state(200, 200, 200, 8,\n                       {3, 3, 3, 2, 2, 2}, // divisors\n                       \"kmnknm\", // split dimensions\n                       \"sssppp\" // step types\n        ),\n        multiply_state(200, 200, 200, 8),\n\n        // strategy: sm3,pn2,sk3,pm2,sn3,pk2\n        multiply_state(200, 200, 200, 8,\n                       {3, 2, 3, 2, 3, 2}, // divisors\n                       \"mnkmnk\", // split dimensions\n                       \"spspsp\" // step types\n        ), \n\n        multiply_state(512, 32, 736, 8,\n                       {2, 2, 2},\n                       \"kmk\",\n                       \"ppp\"\n        )\n    };\n};\n\nINSTANTIATE_TEST_CASE_P(\n    Default,\n    MultiplyTestWithParams,\n    testing::ValuesIn(generate_tests()));\n"
  },
  {
    "path": "tests/multiply_using_layout.cpp",
    "content": "#include <cosma/local_multiply.hpp>\n#include <cosma/multiply.hpp>\n\n#include <gtest/gtest.h>\n#include <gtest_mpi/gtest_mpi.hpp>\n#include <mpi.h>\n\n#include <cmath>\n#include <limits>\n\n\nMPI_Comm subcommunicator(int new_P, MPI_Comm comm = MPI_COMM_WORLD) {\n    // original size\n    int P;\n    MPI_Comm_size(comm, &P);\n\n    // original group\n    MPI_Group group;\n    MPI_Comm_group(comm, &group);\n\n    // new comm and new group\n    MPI_Comm newcomm;\n    MPI_Group newcomm_group;\n\n    // ranks to exclude\n    std::vector<int> exclude_ranks;\n    for (int i = new_P; i < P; ++i) {\n        exclude_ranks.push_back(i);\n    }\n    // create reduced group\n    MPI_Group_excl(group, exclude_ranks.size(), exclude_ranks.data(), &newcomm_group);\n    // create reduced communicator\n    MPI_Comm_create_group(comm, newcomm_group, 0, &newcomm);\n\n    MPI_Group_free(&group);\n    MPI_Group_free(&newcomm_group);\n\n    return newcomm;\n}\n\ntemplate <typename scalar>\nvoid fill_matrix(cosma::CosmaMatrix<scalar> &M) {\n    for (int idx = 0; idx < M.matrix_size(); ++idx) {\n        M.matrix_pointer()[idx] = std::sin(idx);\n    }\n};\n\n// !!! [NOTE] The test depends on correct implementation of `multiply()`.\n//\nTEST(MultiplyUsingLayout, ) {\n    using scalar_t = double;\n    constexpr int nprocs = 4;\n    constexpr int m = 20;\n    constexpr int n = 20;\n    constexpr int k = 80;\n    constexpr scalar_t alpha = 1;\n    constexpr scalar_t beta = 1;\n    char transa = 'N';\n    char transb = 'N';\n\n    int rank;\n    MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n    MPI_Comm comm = subcommunicator(nprocs, MPI_COMM_WORLD);\n\n    if (rank < nprocs) {\n        // cosma::Strategy::min_dim_size = 32;\n        cosma::Strategy strategy(m, n, k, nprocs);\n\n        // create a separate context\n        auto ctx = cosma::make_context<scalar_t>();\n\n        // these matrices have to be created within a context\n        // that is separated from the context that is used\n        // in multiply_using_layout, because the memory pool\n        // might resize within multiply_using_layout\n        // and the A_grid pointers might become outdated\n        cosma::CosmaMatrix<scalar_t> A(ctx, 'A', strategy, rank);\n        cosma::CosmaMatrix<scalar_t> B(ctx, 'B', strategy, rank);\n        cosma::CosmaMatrix<scalar_t> C(ctx, 'C', strategy, rank);\n\n        fill_matrix(A);\n        fill_matrix(B);\n        fill_matrix(C);\n\n        // important if beta > 0\n        cosma::CosmaMatrix<scalar_t> C_act(ctx, 'C', strategy, rank);\n        for (int idx = 0; idx < C_act.matrix_size(); ++idx) {\n            C_act.matrix_pointer()[idx] = C.matrix_pointer()[idx];\n        }\n\n        auto A_grid = A.get_grid_layout();\n        auto B_grid = B.get_grid_layout();\n        auto C_grid = C.get_grid_layout();\n\n        // This routine should not generally be used for COSMA matrices.\n        // If it is used, then we must ensure the context of matrices A_grid, B_grid and C_grid\n        // is not the default context (singleton) that multiply_using_layout is using\n        // because the pointers A_grid, B_grid and C_grid must be persistent \n        // throughout the whole execution of multiply_using_layout\n        cosma::multiply_using_layout(A_grid, B_grid, C_grid, alpha, beta, transa, transb, comm);\n\n        cosma::multiply(A, B, C_act, strategy, comm, alpha, beta);\n\n        // ----- Checks for data integrity\n\n        scalar_t *C_data = C.matrix_pointer();\n        scalar_t *C_act_data = C_act.matrix_pointer();\n\n        // Check if sizes match.\n        //\n        ASSERT_EQ(C.matrix_size(), C_act.matrix_size());\n\n        // Check if data elements match. Fail if an element doesn't match.\n        //\n        for (int i = 0; i < C_act.matrix_size(); ++i) {\n            ASSERT_DOUBLE_EQ(C_data[i], C_act_data[i]);\n        }\n    }\n}\n"
  },
  {
    "path": "tests/pdgemm.cpp",
    "content": "#include \"../utils/pxgemm_utils.hpp\"\n#include <cosma/strategy.hpp>\n\n#include <gtest/gtest.h>\n#include <gtest_mpi/gtest_mpi.hpp>\n#include <vector>\n#include <iostream>\n\nMPI_Comm subcommunicator(int new_P, MPI_Comm comm = MPI_COMM_WORLD) {\n    // original size\n    int P;\n    MPI_Comm_size(comm, &P);\n\n    // original group\n    MPI_Group group;\n    MPI_Comm_group(comm, &group);\n\n    // new comm and new group\n    MPI_Comm newcomm;\n    MPI_Group newcomm_group;\n\n    // ranks to exclude\n    std::vector<int> exclude_ranks;\n    for (int i = new_P; i < P; ++i) {\n        exclude_ranks.push_back(i);\n    }\n    // create reduced group\n    MPI_Group_excl(group, exclude_ranks.size(), exclude_ranks.data(), &newcomm_group);\n    // create reduced communicator\n    MPI_Comm_create_group(comm, newcomm_group, 0, &newcomm);\n\n    MPI_Group_free(&group);\n    MPI_Group_free(&newcomm_group);\n\n    return newcomm;\n}\n\nstruct PdgemmTest : testing::Test {\n    std::unique_ptr<cosma::pxgemm_params<double>> state;\n\n    PdgemmTest() {\n        state = std::make_unique<cosma::pxgemm_params<double>>();\n    }\n};\n\nstruct PdgemmTestWithParams : PdgemmTest,\n                              testing::WithParamInterface<cosma::pxgemm_params<double>> {\n    PdgemmTestWithParams() = default;\n};\n\nTEST_P(PdgemmTestWithParams, pdgemm) {\n    auto state = GetParam();\n\n    MPI_Barrier(MPI_COMM_WORLD);\n\n    int rank;\n    MPI_Comm_rank(MPI_COMM_WORLD, &rank);\n    MPI_Comm comm = subcommunicator(state.P, MPI_COMM_WORLD);\n\n    if (rank < state.P) {\n        if (rank == 0) {\n            std::cout << state << std::endl;\n        }\n\n        // cosma::Strategy::min_dim_size = 32;\n        bool correct = test_pxgemm<double>(state, comm);\n        EXPECT_TRUE(correct);\n\n        MPI_Comm_free(&comm);\n    }\n};\n\nINSTANTIATE_TEST_CASE_P(\n    Default,\n    PdgemmTestWithParams,\n    testing::Values(\n        // edge cases, which are allowed by the standard (m, n or k can be 0)\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            10, 10, // matrix A\n            10, 10, // matrix B\n            10, 10, // matrix C\n\n            // block sizes\n            2, 2, // matrix A\n            2, 2, // matrix B\n            2, 2, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            0, 5, 5,\n\n            // transpose flags\n            'N', 'T',\n\n            // scaling flags\n            1.0, 1.0,\n\n            // leading dims\n            10, 10, 10,\n\n            // proc grid\n            2, 2, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            10, 10, // matrix A\n            10, 10, // matrix B\n            10, 10, // matrix C\n\n            // block sizes\n            2, 2, // matrix A\n            2, 2, // matrix B\n            2, 2, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            5, 0, 5,\n\n            // transpose flags\n            'N', 'T',\n\n            // scaling flags\n            1.0, 1.0,\n\n            // leading dims\n            10, 10, 10,\n\n            // proc grid\n            2, 2, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            10, 10, // matrix A\n            10, 10, // matrix B\n            10, 10, // matrix C\n\n            // block sizes\n            2, 2, // matrix A\n            2, 2, // matrix B\n            2, 2, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            0, 0, 0,\n\n            // transpose flags\n            'N', 'T',\n\n            // scaling flags\n            1.0, 1.0,\n\n            // leading dims\n            10, 10, 10,\n\n            // proc grid\n            2, 2, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            10, 10, // matrix A\n            10, 10, // matrix B\n            10, 10, // matrix C\n\n            // block sizes\n            2, 2, // matrix A\n            2, 2, // matrix B\n            2, 2, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            10, 0, 0,\n\n            // transpose flags\n            'N', 'T',\n\n            // scaling flags\n            1.0, 1.0,\n\n            // leading dims\n            10, 10, 10,\n\n            // proc grid\n            2, 2, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        // scaling matrix C checking (k=0)\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            10, 10, // matrix A\n            10, 10, // matrix B\n            10, 10, // matrix C\n\n            // block sizes\n            2, 2, // matrix A\n            2, 2, // matrix B\n            2, 2, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            10, 10, 0,\n\n            // transpose flags\n            'N', 'T',\n\n            // scaling flags\n            1.0, 1.2,\n\n            // leading dims\n            10, 10, 10,\n\n            // proc grid\n            2, 2, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        // scaling matrix C checking (k=0, irregular)\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            23, 34, // matrix A\n            34, 53, // matrix B\n            23, 53, // matrix C\n\n            // block sizes\n            2, 3, // matrix A\n            4, 5, // matrix B\n            5, 7, // matrix C\n\n            // submatrices ij\n            1, 2, // matrix A\n            2, 3, // matrix B\n            3, 4, // matrix C\n\n            // problem size\n            7, 11, 0,\n\n            // transpose flags\n            'N', 'N',\n\n            // scaling flags\n            1.0, 1.2,\n\n            // leading dims\n            53, 54, 55,\n\n            // proc grid\n            2, 3, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        cosma::pxgemm_params<double>{4, 4, 4, 2, 2, 2, 2, 1, 'T', 'N', 1.0, 0.0},\n\n        // scaling matrix C checking (alpha = 0)\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            10, 10, // matrix A\n            10, 10, // matrix B\n            10, 10, // matrix C\n\n            // block sizes\n            2, 2, // matrix A\n            2, 2, // matrix B\n            2, 2, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            10, 10, 10,\n\n            // transpose flags\n            'N', 'T',\n\n            // scaling flags\n            0.0, 1.2,\n\n            // leading dims\n            10, 10, 10,\n\n            // proc grid\n            2, 2, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        // scaling matrix C checking (alpha = 0, irregular)\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            23, 34, // matrix A\n            34, 53, // matrix B\n            23, 53, // matrix C\n\n            // block sizes\n            2, 3, // matrix A\n            4, 5, // matrix B\n            5, 7, // matrix C\n\n            // submatrices ij\n            1, 2, // matrix A\n            2, 3, // matrix B\n            3, 4, // matrix C\n\n            // problem size\n            7, 11, 5,\n\n            // transpose flags\n            'N', 'N',\n\n            // scaling flags\n            0.0, 1.2,\n\n            // leading dims\n            53, 54, 55,\n\n            // proc grid\n            2, 3, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        // alpha = 1.0, beta = 0.0\n        // single process\n        cosma::pxgemm_params<double>{10, 10, 10, 2, 2, 2, 1, 1, 'N', 'N', 1.0, 0.0},\n        cosma::pxgemm_params<double>{10, 11, 13, 2, 2, 2, 1, 1, 'N', 'N', 1.0, 0.0},\n\n        // default values of alpha and beta\n        cosma::pxgemm_params<double>{10, 10, 10, 2, 2, 2, 2, 2, 'N', 'N', 1.0, 0.0},\n        cosma::pxgemm_params<double>{5, 5, 5, 2, 2, 2, 2, 2, 'N', 'N', 1.0, 0.0},\n        cosma::pxgemm_params<double>{5, 5, 5, 2, 2, 2, 2, 2, 'T', 'N', 1.0, 0.0},\n        cosma::pxgemm_params<double>{8, 4, 8, 2, 2, 2, 3, 2, 'N', 'N', 1.0, 0.0},\n        cosma::pxgemm_params<double>{8, 4, 8, 2, 2, 2, 3, 2, 'T', 'N', 1.0, 0.0},\n\n        // different values of alpha and beta\n        cosma::pxgemm_params<double>{10, 12, 12, 2, 2, 2, 2, 2, 'T', 'N', 1.0, 0.0},\n        cosma::pxgemm_params<double>{10, 11, 12, 3, 2, 3, 3, 2, 'T', 'N', 1.0, 0.0},\n        cosma::pxgemm_params<double>{10, 11, 12, 3, 2, 3, 3, 2, 'T', 'N', 1.0, 1.0},\n        cosma::pxgemm_params<double>{10, 11, 12, 3, 2, 3, 3, 2, 'T', 'N', 0.0, 0.0},\n        cosma::pxgemm_params<double>{10, 11, 12, 3, 2, 3, 3, 2, 'T', 'N', 0.0, 1.0},\n        cosma::pxgemm_params<double>{10, 11, 12, 3, 2, 3, 3, 2, 'T', 'N', 0.5, 0.5},\n\n        // alpha = 0.5, beta = 0.0\n        cosma::pxgemm_params<double>{10, 10, 10, 2, 2, 2, 2, 2, 'N', 'N', 0.5, 0.0},\n        cosma::pxgemm_params<double>{5, 5, 5, 2, 2, 2, 2, 2, 'N', 'N', 0.5, 0.0},\n        cosma::pxgemm_params<double>{5, 5, 5, 2, 2, 2, 2, 2, 'T', 'N', 0.5, 0.0},\n        cosma::pxgemm_params<double>{8, 4, 8, 2, 2, 2, 3, 2, 'N', 'N', 0.5, 0.0},\n        cosma::pxgemm_params<double>{8, 4, 8, 2, 2, 2, 3, 2, 'T', 'N', 0.5, 0.0},\n\n        // too many resources\n        cosma::pxgemm_params<double>{2, 2, 8, 2, 2, 4, 2, 1, 'T', 'N', 1.0, 0.0},\n        cosma::pxgemm_params<double>{4, 4, 24, 4, 4, 8, 2, 1, 'T', 'N', 1.0, 0.0},\n        cosma::pxgemm_params<double>{16, 16, 96, 16, 16, 32, 2, 1, 'T', 'N', 1.0, 0.0},\n        cosma::pxgemm_params<double>{16, 16, 96, 32, 32, 32, 2, 8, 'T', 'N', 0.5, 0.5},\n        cosma::pxgemm_params<double>{13, 13, 448, 13, 13, 13, 2, 7, 'T', 'N', 0.5, 0.5},\n        cosma::pxgemm_params<double>{13, 13, 448, 13, 13, 13, 2, 7, 'N', 'N', 1.0, 0.5},\n\n        cosma::pxgemm_params<double>{3, 3, 7, 3, 3, 3, 1, 1, 'T', 'N', 1.0, 0.0},\n        cosma::pxgemm_params<double>{5, 5, 11, 5, 5, 5, 2, 1, 'T', 'N', 1.0, 0.0},\n        cosma::pxgemm_params<double>{26, 13, 448, 13, 13, 13, 2, 1, 'T', 'N', 1.0, 0.5},\n        cosma::pxgemm_params<double>{26, 13, 448, 13, 13, 13, 2, 7, 'T', 'N', 1.0, 0.5},\n\n        // adapt strategy to scalapack grid when P = 1\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            1280, 128, // matrix A\n            1280, 128, // matrix B\n            128, 128, // matrix C\n\n            // block sizes\n            32, 32, // matrix A\n            32, 32, // matrix B\n            32, 32, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            128, 128, 1280,\n\n            // transpose flags\n            'T', 'N',\n\n            // scaling flags\n            1.0, 0.0,\n\n            // leading dims\n            1280, 1280, 128,\n\n            // proc grid\n            1, 1, 'C',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        // detailed pdgemm call with ia, ja = 1\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            1280, 1280, // matrix A\n            1280, 1280, // matrix B\n            1280, 1280, // matrix C\n\n            // block sizes\n            32, 32, // matrix A\n            32, 32, // matrix B\n            32, 32, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            512, 32, 736,\n\n            // transpose flags\n            'N', 'T',\n\n            // scaling flags\n            1.0, 0.0,\n\n            // leading dims\n            640, 640, 640,\n\n            // proc grid\n            2, 4, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        // detailed pdgemm call\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            1280, 1280, // matrix A\n            1280, 1280, // matrix B\n            1280, 1280, // matrix C\n\n            // block sizes\n            32, 32, // matrix A\n            32, 32, // matrix B\n            32, 32, // matrix C\n\n            // submatrices ij\n            1, 545, // matrix A\n            513, 545, // matrix B\n            1, 513, // matrix C\n\n            // problem size\n            512, 32, 736,\n\n            // transpose flags\n            'N', 'T',\n\n            // scaling flags\n            1.0, 0.0,\n\n            // leading dims\n            640, 640, 640,\n\n            // proc grid\n            2, 4, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        // detailed pdgemm call\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            1280, 1280, // matrix A\n            1280, 1280, // matrix B\n            1280, 1280, // matrix C\n\n            // block sizes\n            32, 32, // matrix A\n            32, 32, // matrix B\n            32, 32, // matrix C\n\n            // submatrices ij\n            1, 545, // matrix A\n            513, 545, // matrix B\n            1, 513, // matrix C\n\n            // problem size\n            512, 32, 736,\n\n            // transpose flags\n            'N', 'T',\n\n            // scaling flags\n            1.0, 1.0,\n\n            // leading dims\n            640, 640, 640,\n\n            // proc grid\n            2, 4, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            1000, 10, // matrix A\n            1000, 10, // matrix B\n            10, 10, // matrix C\n\n            // block sizes\n            128, 128, // matrix A\n            128, 128, // matrix B\n            128, 128, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            10, 10, 1000,\n\n            // transpose flags\n            'T', 'N',\n\n            // scaling flags\n            1.0, 0.0,\n\n            // leading dims\n            512, 512, 10,\n\n            // proc grid\n            2, 2, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            1824, 128, // matrix A\n            1824, 128, // matrix B\n            128, 128, // matrix C\n\n            // block sizes\n            32, 32, // matrix A\n            32, 32, // matrix B\n            32, 32, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            128, 128, 1824,\n\n            // transpose flags\n            'T', 'N',\n\n            // scaling flags\n            1.0, 0.0,\n\n            // leading dims\n            928, 928, 64,\n\n            // proc grid\n            2, 4, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            43417, 217, // matrix A\n            43417, 217, // matrix B\n            217, 217, // matrix C\n\n            // block sizes\n            169, 108, // matrix A\n            169, 108, // matrix B\n            108, 108, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            217, 217, 43417,\n\n            // transpose flags\n            'T', 'N',\n\n            // scaling flags\n            1.0, 0.0,\n\n            // leading dims\n            54272, 54272, 1088,\n\n            // proc grid\n            8, 1, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            43176, 217, // matrix A\n            43176, 217, // matrix B\n            2176, 217, // matrix C\n\n            // block sizes\n            1696, 108, // matrix A\n            1696, 108, // matrix B\n            1088, 108, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            217, 217, 43176,\n\n            // transpose flags\n            'T', 'N',\n\n            // scaling flags\n            1.0, 0.0,\n\n            // leading dims\n            54272, 54272, 1088,\n\n            // proc grid\n            8, 1, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n\t// CP2K runs from H2O-sos-mp2-lr\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            23, 23, // matrix A\n            23, 23, // matrix B\n            23, 23, // matrix C\n\n            // block sizes\n            12, 12, // matrix A\n            12, 12, // matrix B\n            12, 12, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            23, 23, 4,\n\n            // transpose flags\n            'N', 'T',\n\n            // scaling flags\n            1.0, 0.0,\n\n            // leading dims\n            12, 12, 12,\n\n            // proc grid\n            2, 1, 'C',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            23, 23, // matrix A\n            23, 23, // matrix B\n            23, 23, // matrix C\n\n            // block sizes\n            12, 12, // matrix A\n            12, 12, // matrix B\n            12, 12, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            23, 23, 4,\n\n            // transpose flags\n            'N', 'T',\n\n            // scaling flags\n            1.0, -1.0,\n\n            // leading dims\n            12, 12, 12,\n\n            // proc grid\n            2, 1, 'C',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            83, 83, // matrix A\n            83, 83, // matrix B\n            83, 83, // matrix C\n\n            // block sizes\n            32, 32, // matrix A\n            32, 32, // matrix B\n            32, 32, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            83, 83, 83,\n\n            // transpose flags\n            'N', 'T',\n\n            // scaling flags\n            1.0, 0.0,\n\n            // leading dims\n            83, 83, 83,\n\n            // proc grid\n            1, 1, 'C',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            83, 83, // matrix A\n            83, 77, // matrix B\n            83, 77, // matrix C\n\n            // block sizes\n            32, 32, // matrix A\n            32, 32, // matrix B\n            32, 32, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            83, 77, 83,\n\n            // transpose flags\n            'T', 'N',\n\n            // scaling flags\n            1.0, 0.0,\n\n            // leading dims\n            83, 83, 83,\n\n            // proc grid\n            1, 1, 'C',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        },\n\n        cosma::pxgemm_params<double>{\n            // matrix dimensions\n            83, 83, // matrix A\n            83, 77, // matrix B\n            83, 77, // matrix C\n\n            // block sizes\n            32, 32, // matrix A\n            32, 32, // matrix B\n            32, 32, // matrix C\n\n            // submatrices ij\n            1, 1, // matrix A\n            1, 1, // matrix B\n            1, 1, // matrix C\n\n            // problem size\n            83, 77, 83,\n\n            // transpose flags\n            'T', 'N',\n\n            // scaling flags\n            1.0, 0.0,\n\n            // leading dims\n            83, 83, 83,\n\n            // proc grid\n            1, 1, 'R',\n\n            // proc srcs\n            0, 0, // matrix A\n            0, 0, // matrix B\n            0, 0  // matrix C\n        }\n    )\n);\n\n"
  },
  {
    "path": "tests/scalar_matmul.cpp",
    "content": "#include <gtest/gtest.h>\n#include <gtest_mpi/gtest_mpi.hpp>\n\n#include <string>\n#include \"../utils/cosma_utils.hpp\"\n\ntemplate <typename Scalar>\nvoid test_matmul() {\n    constexpr int m = 100;\n    constexpr int n = 100;\n    constexpr int k = 100;\n    constexpr int P = 8;\n    std::string step_types = \"spspsp\";\n    std::string dims = \"mnkmnk\";\n    std::vector<int> divs = {2, 2, 2, 2, 2, 2};\n\n    auto comm = MPI_COMM_WORLD;\n    int rank;\n    MPI_Comm_rank(comm, &rank);\n\n    Strategy strategy(m, n, k, P, divs, dims, step_types);\n\n    if (rank == 0) {\n        std::cout << \"Strategy = \" << strategy << std::endl;\n    }\n\n    auto ctx = cosma::make_context<Scalar>();\n\n    // first run without overlapping communication and computation\n    bool no_overlap = test_cosma<Scalar>(strategy, ctx, comm, 1e-2, 0);\n    ASSERT_TRUE(no_overlap);\n\n    // enable the ovelap of comm and comp\n    strategy.enable_overlapping_comm_and_comp();\n\n    // then run with the overlap of communication and computation\n    bool with_overlap = test_cosma<Scalar>(strategy, ctx, comm, 1e-2, 1);\n    ASSERT_TRUE(with_overlap);\n}\n\nTEST(Multiply, Float) { test_matmul<float>(); }\n\nTEST(Multiply, Double) { test_matmul<double>(); }\n\nTEST(Multiply, ComplexFloat) { test_matmul<std::complex<float>>(); }\n\nTEST(Multiply, ComplexDouble) { test_matmul<std::complex<double>>(); }\n"
  },
  {
    "path": "utils/cosma_utils.hpp",
    "content": "#include <algorithm>\n#include <cctype>\n#include <cosma/local_multiply.hpp>\n#include <cosma/mpi_mapper.hpp>\n#include <cosma/multiply.hpp>\n#include <cstdlib>\n#include <fstream>\n#include <iostream>\n#include <random>\n#include <string>\n#include <vector>\n\nusing namespace cosma;\n\ntemplate <typename T>\nvoid fill_matrix(T *ptr, size_t size) {\n    static std::random_device dev;                       // seed\n    static std::mt19937 rng(dev());                      // generator\n    static std::uniform_real_distribution<T> dist(10.0); // distribution\n\n    for (unsigned i = 0; i < size; ++i) {\n        ptr[i] = T{dist(rng)};\n    }\n}\n\ntemplate <typename T>\nvoid fill_matrix(std::complex<T> *ptr, size_t size) {\n    static std::random_device dev;                       // seed\n    static std::mt19937 rng(dev());                      // generator\n    static std::uniform_real_distribution<T> dist(10.0); // distribution\n\n    for (unsigned i = 0; i < size; ++i) {\n        ptr[i] = std::complex<T>{dist(rng), dist(rng)};\n    }\n}\n\ntemplate <typename Scalar>\nbool test_cosma(Strategy s,\n                context<Scalar> &ctx,\n                MPI_Comm comm = MPI_COMM_WORLD,\n                double epsilon = 1e-8,\n                int tag = 0) {\n    auto alpha = Scalar{1};\n    auto beta = Scalar{1};\n\n    int n_comm_rounds = 10;\n\n    int rank;\n    int size;\n    MPI_Comm_rank(comm, &rank);\n    MPI_Comm_size(comm, &size);\n\n    auto mpi_type = cosma::mpi_mapper<Scalar>::getType();\n\n    int m = s.m;\n    int n = s.n;\n    int k = s.k;\n    int P = s.P;\n\n    // Declare A,B and C COSMA matrices objects\n    CosmaMatrix<Scalar> A(ctx, 'A', s, rank);\n    CosmaMatrix<Scalar> B(ctx, 'B', s, rank);\n    CosmaMatrix<Scalar> C(ctx, 'C', s, rank);\n\n    // initial sizes\n    auto sizeA = A.matrix_size();\n    auto sizeB = B.matrix_size();\n    auto sizeC = C.matrix_size();\n\n    // fill the matrices with random data\n    fill_matrix(A.matrix_pointer(), sizeA);\n    fill_matrix(B.matrix_pointer(), sizeB);\n    fill_matrix(C.matrix_pointer(), sizeC);\n\n#ifdef DEBUG\n    if (rank == 0) {\n        std::cout << \"Initial data in A and B:\" << std::endl;\n    }\n    for (std::size_t i = 0; i < s.P; i++) {\n        if (rank == i) {\n            printf(\"(%d) A: \", i);\n            for (std::size_t j = 0; j < sizeA; j++)\n                printf(\"%5.3d \", A.matrix_pointer()[j]);\n            printf(\"\\n\");\n\n            printf(\"(%d) B: \", i);\n            for (std::size_t j = 0; j < sizeB; j++)\n                printf(\"%5.3d \", B.matrix_pointer()[j]);\n            printf(\"\\n\");\n\n            printf(\"(%d) C: \", i);\n            for (size_t j = 0; j < sizeC; j++)\n                printf(\"%5.3d \", C.matrix_pointer()[j]);\n            printf(\"\\n\");\n        }\n        MPI_Barrier(comm);\n    }\n#endif // DEBUG\n\n    bool isOK;\n\n    // Then rank0 ask for other ranks data\n    std::vector<Scalar> As, Bs, Cs;\n    if (rank == 0) {\n        As = std::vector<Scalar>(m * k);\n        std::memcpy(\n            As.data(), A.matrix_pointer(), A.matrix_size() * sizeof(Scalar));\n        Bs = std::vector<Scalar>(k * n);\n        std::memcpy(\n            Bs.data(), B.matrix_pointer(), B.matrix_size() * sizeof(Scalar));\n        // copy C in case beta > 0\n        Cs = std::vector<Scalar>(m * n);\n        std::memcpy(\n            Cs.data(), C.matrix_pointer(), C.matrix_size() * sizeof(Scalar));\n\n        int offsetA = sizeA;\n        int offsetB = sizeB;\n        int offsetC = sizeC;\n\n        for (int i = 1; i < s.P; i++) {\n            int receive_size_A = A.matrix_size(i);\n            int receive_size_B = B.matrix_size(i);\n            int receive_size_C = C.matrix_size(i);\n\n            MPI_Status status;\n            int amount;\n\n            // Rank 0 receive data\n            int info = MPI_Recv(As.data() + offsetA,\n                                receive_size_A,\n                                mpi_type,\n                                i,\n                                tag * n_comm_rounds,\n                                comm,\n                                &status);\n            if (info != MPI_SUCCESS) {\n                // check if we received the right amount\n                MPI_Get_elements(&status, mpi_type, &amount);\n                if (amount != receive_size_A) {\n                    std::cout << \"Error: Did not receive all data for matrix A!\"\n                              << std::endl;\n                    std::cout << \"Received \" << amount << \", instead of \"\n                              << receive_size_A << std::endl;\n                    std::cout << \"Message source: \" << status.MPI_SOURCE\n                              << \", tag = \" << status.MPI_TAG << std::endl;\n                }\n            }\n\n            info = MPI_Recv(Bs.data() + offsetB,\n                            receive_size_B,\n                            mpi_type,\n                            i,\n                            tag * n_comm_rounds + 1,\n                            comm,\n                            &status);\n            if (info != MPI_SUCCESS) {\n                // check if we received the right amount\n                MPI_Get_elements(&status, mpi_type, &amount);\n                if (amount != receive_size_B) {\n                    std::cout << \"Error: Did not receive all data for matrix B!\"\n                              << std::endl;\n                    std::cout << \"Received \" << amount << \", instead of \"\n                              << receive_size_B << std::endl;\n                    std::cout << \"Message source: \" << status.MPI_SOURCE\n                              << \", tag = \" << status.MPI_TAG << std::endl;\n                }\n            }\n\n            info = MPI_Recv(Cs.data() + offsetC,\n                            receive_size_C,\n                            mpi_type,\n                            i,\n                            tag * n_comm_rounds + 2,\n                            comm,\n                            &status);\n            if (info != MPI_SUCCESS) {\n                // check if we received the right amount\n                MPI_Get_elements(&status, mpi_type, &amount);\n                if (amount != receive_size_C) {\n                    std::cout << \"Error: Did not receive all data for matrix C!\"\n                              << std::endl;\n                    std::cout << \"Received \" << amount << \", instead of \"\n                              << receive_size_C << std::endl;\n                    std::cout << \"Message source: \" << status.MPI_SOURCE\n                              << \", tag = \" << status.MPI_TAG << std::endl;\n                }\n            }\n\n            offsetA += receive_size_A;\n            offsetB += receive_size_B;\n            offsetC += receive_size_C;\n        }\n    }\n    // Rank i send data\n    if (rank > 0 && rank < s.P) {\n        int info = MPI_Ssend(\n            A.matrix_pointer(), sizeA, mpi_type, 0, tag * n_comm_rounds, comm);\n        if (info != MPI_SUCCESS) {\n            std::cout << \"MPI_Send was not successful on rank: \" << rank\n                      << \", for matrix A\" << std::endl;\n        }\n        info = MPI_Ssend(B.matrix_pointer(),\n                         sizeB,\n                         mpi_type,\n                         0,\n                         tag * n_comm_rounds + 1,\n                         comm);\n        if (info != MPI_SUCCESS) {\n            std::cout << \"MPI_Send was not successful on rank: \" << rank\n                      << \", for matrix B\" << std::endl;\n        }\n        info = MPI_Ssend(C.matrix_pointer(),\n                         sizeC,\n                         mpi_type,\n                         0,\n                         tag * n_comm_rounds + 2,\n                         comm);\n        if (info != MPI_SUCCESS) {\n            std::cout << \"MPI_Send was not successful on rank: \" << rank\n                      << \", for matrix C\" << std::endl;\n        }\n    }\n\n    // MPI_Barrier(comm);\n\n    // Then rank 0 must reorder data locally\n    std::vector<Scalar> globA;\n    std::vector<Scalar> globB;\n    std::vector<Scalar> globCcheck;\n    if (rank == 0) {\n        globA.resize(m * k);\n        globB.resize(k * n);\n        globCcheck.resize(m * n);\n        int offsetA = 0;\n        int offsetB = 0;\n        int offsetC = 0;\n\n        for (int i = 0; i < s.P; i++) {\n            int local_size_A = A.matrix_size(i);\n            int local_size_B = B.matrix_size(i);\n            int local_size_C = C.matrix_size(i);\n\n            for (int j = 0; j < local_size_A; j++) {\n                int y, x;\n                std::tie(y, x) = A.global_coordinates(j, i);\n                if (y >= 0 && x >= 0) {\n                    globA.at(x * m + y) = As.at(offsetA + j);\n                }\n            }\n            for (int j = 0; j < local_size_B; j++) {\n                int y, x;\n                std::tie(y, x) = B.global_coordinates(j, i);\n                // std::cout << \"Mapped successfully!\\n\";\n                if (y >= 0 && x >= 0) {\n                    // globB.at(x*n+y)=Bs.at(i*sizeB+j);\n                    // std::cout << \"Retrieved Bs value successfully!\\n\";\n                    globB.at(x * k + y) = Bs.at(offsetB + j);\n                }\n            }\n            for (int j = 0; j < local_size_C; j++) {\n                int y, x;\n                std::tie(y, x) = C.global_coordinates(j, i);\n                // std::cout << \"Mapped successfully!\\n\";\n                if (y >= 0 && x >= 0) {\n                    // globB.at(x*n+y)=Bs.at(i*sizeB+j);\n                    // std::cout << \"Retrieved Bs value successfully!\\n\";\n                    globCcheck.at(x * m + y) = Cs.at(offsetC + j);\n                }\n            }\n\n            offsetA += local_size_A;\n            offsetB += local_size_B;\n            offsetC += local_size_C;\n        }\n        // Now compute the result\n        cosma::local_multiply_cpu(globA.data(),\n                                  globB.data(),\n                                  globCcheck.data(),\n                                  m,\n                                  n,\n                                  k,\n                                  alpha,\n                                  beta);\n#ifdef DEBUG\n        std::cout << \"Complete matrix A: \" << std::endl;\n        for (int i = 0; i < m; i++) {\n            for (int j = 0; j < k; j++) {\n                std::cout << globA[j * m + i] << \" \";\n            }\n            std::cout << \"\\n\";\n        }\n        std::cout << \"\\n\";\n\n        std::cout << \"Complete matrix B: \" << std::endl;\n        for (int i = 0; i < k; i++) {\n            for (int j = 0; j < n; j++) {\n                std::cout << globB[j * k + i] << \" \";\n            }\n            std::cout << \"\\n\";\n        }\n\n        std::cout << \"Complete matrix C: \" << std::endl;\n        for (int i = 0; i < m; i++) {\n            for (int j = 0; j < n; j++) {\n                std::cout << globCcheck[j * m + i] << \" \";\n            }\n            std::cout << \"\\n\";\n        }\n        std::cout << \"\\n\";\n#endif\n    }\n\n    multiply(A, B, C, s, comm, alpha, beta);\n\n    // Then rank0 asks for other ranks data\n    if (rank == 0) {\n        std::memcpy(\n            Cs.data(), C.matrix_pointer(), C.matrix_size() * sizeof(Scalar));\n\n        int offsetC = sizeC;\n\n        for (int i = 1; i < s.P; i++) {\n            int receive_size_C = C.matrix_size(i);\n            // Rank 0 receive data\n            MPI_Recv(Cs.data() + offsetC,\n                     receive_size_C,\n                     mpi_type,\n                     i,\n                     tag * n_comm_rounds + 4,\n                     comm,\n                     MPI_STATUSES_IGNORE);\n            offsetC += receive_size_C;\n        }\n    }\n    // Rank i sends data\n    if (rank > 0 && rank < s.P) {\n        MPI_Ssend(C.matrix_pointer(),\n                  sizeC,\n                  mpi_type,\n                  0,\n                  tag * n_comm_rounds + 4,\n                  comm);\n    }\n\n    // Then rank 0 must reorder data locally\n    std::vector<Scalar> globC;\n    if (rank == 0) {\n        globC.resize(m * n);\n        int offsetC = 0;\n\n        for (int i = 0; i < s.P; i++) {\n            int local_size_C = C.matrix_size(i);\n\n            for (int j = 0; j < local_size_C; j++) {\n                int y, x;\n                std::tie(y, x) = C.global_coordinates(j, i);\n                if (y >= 0 && x >= 0) {\n                    globC.at(x * m + y) = Cs.at(offsetC + j);\n                }\n            }\n            offsetC += local_size_C;\n        }\n\n        // Now Check result\n        isOK = globCcheck.size() == globC.size();\n        for (int i = 0; i < globC.size(); ++i) {\n            // Use relative error for large values, absolute error for small\n            // values\n            double abs_error = std::abs(globC[i] - globCcheck[i]);\n            double scale =\n                std::max(std::abs(globC[i]), std::abs(globCcheck[i]));\n            double rel_error = (scale > 1e-10) ? abs_error / scale : abs_error;\n            // For float32, relative error tolerance should be ~1e-6\n            // For float64, relative error tolerance should be ~1e-12\n            double tolerance = (sizeof(Scalar) == 4) ? 1e-5 : epsilon;\n            isOK = isOK && (rel_error < tolerance);\n        }\n\n        if (!isOK) {\n            std::cout << \"Result is NOT OK\" << std::endl;\n            int error_count = 0;\n            const int MAX_ERRORS_TO_PRINT = 20;\n            for (int i = 0; i < m * n; i++) {\n                if (globCcheck[i] != globC[i]) {\n                    error_count++;\n                    if (error_count <= MAX_ERRORS_TO_PRINT) {\n                        int x = i % m;\n                        int y = i / m;\n                        int locidx, rank;\n                        std::tie(locidx, rank) = C.local_coordinates(x, y);\n                        std::cout\n                            << \"global(\" << x << \", \" << y\n                            << \") = (loc = \" << locidx << \", rank = \" << rank\n                            << \") = \" << globC.at(i) << \" and should be \"\n                            << globCcheck.at(i) << \" (diff = \"\n                            << std::abs(globC.at(i) - globCcheck.at(i)) << \")\"\n                            << std::endl;\n                    }\n                }\n            }\n            std::cout << \"Total errors: \" << error_count << \" out of \"\n                      << (m * n) << \" elements (\"\n                      << (100.0 * error_count / (m * n)) << \"%)\" << std::endl;\n        } else {\n            std::cout << \"Result is OK\" << std::endl;\n        }\n    }\n#ifdef DEBUG\n    for (int i = 0; i < s.P; i++) {\n        if (rank == i) {\n            printf(\"(%d) A: \", i);\n            for (auto j = 0; j < sizeA; j++)\n                printf(\"%5.3d \", A.matrix_pointer()[j]);\n            printf(\"\\n\");\n\n            printf(\"(%d) B: \", i);\n            for (auto j = 0; j < sizeB; j++)\n                printf(\"%5.3d \", B.matrix_pointer()[j]);\n            printf(\"\\n\");\n\n            printf(\"(%d) C: \", i);\n            for (auto j = 0; j < sizeC; j++)\n                printf(\"%5.3d \", C.matrix_pointer()[j]);\n            printf(\"\\n\");\n        }\n        MPI_Barrier(comm);\n    }\n#endif // DEBUG\n\n    // Synchronize all ranks before returning to prevent hangs\n    MPI_Barrier(comm);\n\n    return rank > 0 || isOK;\n}\n"
  },
  {
    "path": "utils/parse_strategy.hpp",
    "content": "#include <regex>\n#include <vector>\n\n#include <cosma/strategy.hpp>\n#include <cosma/environment_variables.hpp>\n\n/*\n * Parses the command line input to get the problem size\n * and divisions strategy if provided in the input.\n * Returns the strategy.\n */\n\n// finds the next int after start in the line\nint next_int(int start, const std::string& line) {\n    if (start < 0)\n        return -1;\n    std::regex int_expr(\"([0-9]+)\");\n    auto it = std::sregex_iterator(line.begin() + start, line.end(), int_expr);\n    int result = std::stoi(it->str());\n    return result;\n}\n\n// token is a triplet e.g. pm3 (denoting parallel (m / 3) step)\nvoid process_token(const std::string &step_triplet, \n                   std::string& step_type,\n                   std::string& split_dimension,\n                   std::vector<int>& divisors) {\n    if (step_triplet.length() < 3)\n        return;\n    step_type += step_triplet[0];\n    split_dimension += step_triplet[1];\n    divisors.push_back(next_int(2, step_triplet));\n}\n\ncosma::Strategy parse_strategy(const int m, const int n, \n                               const int k, const int P,\n                               const std::vector<std::string>& steps,\n                               const long long memory_limit,\n                               const bool overlap_comm_and_comp) {\n    if (steps.size() == 0) {\n        cosma::Strategy strategy(m, n, k, P, memory_limit);\n        if (overlap_comm_and_comp) {\n            strategy.enable_overlapping_comm_and_comp();\n        }\n        return strategy;\n    } else {\n        std::string step_type = \"\";\n        std::string split_dimension = \"\";\n        std::vector<int> divisors;\n\n        for (const std::string& step : steps) {\n            process_token(step, step_type, split_dimension, divisors);\n        }\n\n        cosma::Strategy strategy(m, n, k, P, divisors, split_dimension, step_type, memory_limit);\n        if (overlap_comm_and_comp) {\n            strategy.enable_overlapping_comm_and_comp();\n        }\n        return strategy;\n    }\n}\n\n"
  },
  {
    "path": "utils/pxgemm_utils.hpp",
    "content": "// from std\n#include <algorithm>\n#include <array>\n#include <cassert>\n#include <chrono>\n#include <complex>\n#include <iostream>\n#include <limits>\n#include <vector>\n\n// from cosma\n#include <cosma/blacs.hpp>\n#include <cosma/context.hpp>\n#include <cosma/cosma_pxgemm.hpp>\n#include <cosma/profiler.hpp>\n#include <cosma/pxgemm_params.hpp>\n#include <cosma/random_generator.hpp>\n#include <cosma/scalapack.hpp>\n\n// random number generator\n// we cast them to ints, so that we can more easily test them\n// but it's not necessary (they are anyway stored as double's)\ntemplate <typename T>\nvoid fill_randomly(std::vector<T> &in) {\n    std::generate(in.begin(), in.end(), []() {\n        return cosma::random_generator<T>::sample();\n    });\n}\n\ntemplate <typename T>\nstruct real_type {\n    using type = T;\n};\ntemplate <typename T>\nstruct real_type<std::complex<T>> {\n    using type = T;\n};\n\n/// Fill NaN into the local buffer entries that correspond to the submatrix\n/// C[ic:ic+m, jc:jc+n] (1-based, inclusive) of the globally distributed\n/// matrix described by `desc`. Only elements owned by the calling rank are\n/// written. Elements outside the submatrix (padding, unowned blocks, or\n/// out-of-submatrix owned elements) are left untouched.\n/// This is used to verify that pdgemm does not read C when beta==0,\n/// since reading uninitialized memory and multiplying by 0 would propagate NaN.\ntemplate <typename T>\nvoid fill_nan(std::vector<T> &in,\n              const std::array<int, 9> &desc,\n              int ic,\n              int jc,\n              int m,\n              int n) {\n    using R = typename real_type<T>::type;\n    int lld = desc[8];\n    int nb_rows = desc[4];\n    int nb_cols = desc[5];\n    int src_row = desc[6];\n    int src_col = desc[7];\n    int ctxt = desc[1];\n\n    int nprow, npcol, myrow, mycol;\n    cosma::blacs::Cblacs_gridinfo(ctxt, &nprow, &npcol, &myrow, &mycol);\n\n    T *buf = in.data();\n\n    // ic, jc are 1-based -> convert to 0-based\n    int ic0 = ic - 1;\n    int jc0 = jc - 1;\n\n    // iterate over global rows in [ic0, ic0+m)\n    for (int gr = ic0; gr < ic0 + m; ++gr) {\n        // which process row owns this global row?\n        int proc_row = (gr / nb_rows) % nprow;\n        if (proc_row != myrow)\n            continue;\n        // local row index\n        int local_row = (gr / (nb_rows * nprow)) * nb_rows + gr % nb_rows;\n\n        for (int gc = jc0; gc < jc0 + n; ++gc) {\n            // which process col owns this global col?\n            int proc_col = (gc / nb_cols) % npcol;\n            if (proc_col != mycol)\n                continue;\n            // local col index\n            int local_col = (gc / (nb_cols * npcol)) * nb_cols + gc % nb_cols;\n\n            buf[local_col * lld + local_row] =\n                std::numeric_limits<R>::quiet_NaN();\n        }\n    }\n}\n\n// **********************\n//   ScaLAPACK routines\n// **********************\nnamespace scalapack {\n#ifdef __cplusplus\nextern \"C\" {\n#endif\nvoid descinit_(int *desc,\n               const int *m,\n               const int *n,\n               const int *mb,\n               const int *nb,\n               const int *irsrc,\n               const int *icsrc,\n               const int *ictxt,\n               const int *lld,\n               int *info);\n\nvoid psgemm_(const char *trans_a,\n             const char *trans_b,\n             const int *m,\n             const int *n,\n             const int *k,\n             const float *alpha,\n             const float *a,\n             const int *ia,\n             const int *ja,\n             const int *desca,\n             const float *b,\n             const int *ib,\n             const int *jb,\n             const int *descb,\n             const float *beta,\n             float *c,\n             const int *ic,\n             const int *jc,\n             const int *descc);\n\nvoid pdgemm_(const char *trans_a,\n             const char *trans_b,\n             const int *m,\n             const int *n,\n             const int *k,\n             const double *alpha,\n             const double *a,\n             const int *ia,\n             const int *ja,\n             const int *desca,\n             const double *b,\n             const int *ib,\n             const int *jb,\n             const int *descb,\n             const double *beta,\n             double *c,\n             const int *ic,\n             const int *jc,\n             const int *descc);\n\nvoid pcgemm_(const char *trans_a,\n             const char *trans_b,\n             const int *m,\n             const int *n,\n             const int *k,\n             const float *alpha,\n             const float *a,\n             const int *ia,\n             const int *ja,\n             const int *desca,\n             const float *b,\n             const int *ib,\n             const int *jb,\n             const int *descb,\n             const float *beta,\n             float *c,\n             const int *ic,\n             const int *jc,\n             const int *descc);\n\nvoid pzgemm_(const char *trans_a,\n             const char *trans_b,\n             const int *m,\n             const int *n,\n             const int *k,\n             const double *alpha,\n             const double *a,\n             const int *ia,\n             const int *ja,\n             const int *desca,\n             const double *b,\n             const int *ib,\n             const int *jb,\n             const int *descb,\n             const double *beta,\n             double *c,\n             const int *ic,\n             const int *jc,\n             const int *descc);\n#ifdef __cplusplus\n}\n#endif\n} // namespace scalapack\n\n// *************************************\n//    templated scalapack pxgemm calls\n// *************************************\n// this is just for the convenience\ntemplate <typename T>\nstruct scalapack_pxgemm {\n    static inline void pxgemm(const char *trans_a,\n                              const char *trans_b,\n                              const int *m,\n                              const int *n,\n                              const int *k,\n                              const T *alpha,\n                              const T *a,\n                              const int *ia,\n                              const int *ja,\n                              const int *desca,\n                              const T *b,\n                              const int *ib,\n                              const int *jb,\n                              const int *descb,\n                              const T *beta,\n                              T *c,\n                              const int *ic,\n                              const int *jc,\n                              const int *descc);\n};\n\ntemplate <>\ninline void scalapack_pxgemm<float>::pxgemm(const char *trans_a,\n                                            const char *trans_b,\n                                            const int *m,\n                                            const int *n,\n                                            const int *k,\n                                            const float *alpha,\n                                            const float *a,\n                                            const int *ia,\n                                            const int *ja,\n                                            const int *desca,\n                                            const float *b,\n                                            const int *ib,\n                                            const int *jb,\n                                            const int *descb,\n                                            const float *beta,\n                                            float *c,\n                                            const int *ic,\n                                            const int *jc,\n                                            const int *descc) {\n    scalapack::psgemm_(trans_a,\n                       trans_b,\n                       m,\n                       n,\n                       k,\n                       alpha,\n                       a,\n                       ia,\n                       ja,\n                       desca,\n                       b,\n                       ib,\n                       jb,\n                       descb,\n                       beta,\n                       c,\n                       ic,\n                       jc,\n                       descc);\n}\n\ntemplate <>\ninline void scalapack_pxgemm<double>::pxgemm(const char *trans_a,\n                                             const char *trans_b,\n                                             const int *m,\n                                             const int *n,\n                                             const int *k,\n                                             const double *alpha,\n                                             const double *a,\n                                             const int *ia,\n                                             const int *ja,\n                                             const int *desca,\n                                             const double *b,\n                                             const int *ib,\n                                             const int *jb,\n                                             const int *descb,\n                                             const double *beta,\n                                             double *c,\n                                             const int *ic,\n                                             const int *jc,\n                                             const int *descc) {\n    scalapack::pdgemm_(trans_a,\n                       trans_b,\n                       m,\n                       n,\n                       k,\n                       alpha,\n                       a,\n                       ia,\n                       ja,\n                       desca,\n                       b,\n                       ib,\n                       jb,\n                       descb,\n                       beta,\n                       c,\n                       ic,\n                       jc,\n                       descc);\n}\n\ntemplate <>\ninline void\nscalapack_pxgemm<std::complex<float>>::pxgemm(const char *trans_a,\n                                              const char *trans_b,\n                                              const int *m,\n                                              const int *n,\n                                              const int *k,\n                                              const std::complex<float> *alpha,\n                                              const std::complex<float> *a,\n                                              const int *ia,\n                                              const int *ja,\n                                              const int *desca,\n                                              const std::complex<float> *b,\n                                              const int *ib,\n                                              const int *jb,\n                                              const int *descb,\n                                              const std::complex<float> *beta,\n                                              std::complex<float> *c,\n                                              const int *ic,\n                                              const int *jc,\n                                              const int *descc) {\n    scalapack::pcgemm_(trans_a,\n                       trans_b,\n                       m,\n                       n,\n                       k,\n                       reinterpret_cast<const float *>(alpha),\n                       reinterpret_cast<const float *>(a),\n                       ia,\n                       ja,\n                       desca,\n                       reinterpret_cast<const float *>(b),\n                       ib,\n                       jb,\n                       descb,\n                       reinterpret_cast<const float *>(beta),\n                       reinterpret_cast<float *>(c),\n                       ic,\n                       jc,\n                       descc);\n}\n\ntemplate <>\ninline void scalapack_pxgemm<std::complex<double>>::pxgemm(\n    const char *trans_a,\n    const char *trans_b,\n    const int *m,\n    const int *n,\n    const int *k,\n    const std::complex<double> *alpha,\n    const std::complex<double> *a,\n    const int *ia,\n    const int *ja,\n    const int *desca,\n    const std::complex<double> *b,\n    const int *ib,\n    const int *jb,\n    const int *descb,\n    const std::complex<double> *beta,\n    std::complex<double> *c,\n    const int *ic,\n    const int *jc,\n    const int *descc) {\n    scalapack::pzgemm_(trans_a,\n                       trans_b,\n                       m,\n                       n,\n                       k,\n                       reinterpret_cast<const double *>(alpha),\n                       reinterpret_cast<const double *>(a),\n                       ia,\n                       ja,\n                       desca,\n                       reinterpret_cast<const double *>(b),\n                       ib,\n                       jb,\n                       descb,\n                       reinterpret_cast<const double *>(beta),\n                       reinterpret_cast<double *>(c),\n                       ic,\n                       jc,\n                       descc);\n}\n\n// // compares two vectors up to eps precision, returns true if they are equal\n// template <typename T>\n// bool validate_results(std::vector<T>& v1, std::vector<T>& v2, double\n// epsilon=1e-6) {\n//     double lower_threshold = 1e-3;\n\n//     if (v1.size() != v2.size())\n//         return false;\n//     if (v1.size() == 0)\n//         return true;\n//     bool correct = true;\n//     for (size_t i = 0; i < v1.size(); ++i) {\n//         if (std::abs(v1[i] - v2[i]) / std::max(std::max(lower_threshold,\n//         (double) std::abs(v1[i])), (double) std::abs(v2[i])) > epsilon) {\n//             std::cout << \"epsilon = \" << epsilon << \", v1 = \" << v1[i] << \",\n//             which is != \" << v2[i] << std::endl; correct = false; return\n//             correct;\n//         }\n//     }\n//     return correct;\n// }\ntemplate <typename T>\nstruct is_complex : std::false_type {};\ntemplate <typename T>\nstruct is_complex<std::complex<T>> : std::true_type {};\n\ntemplate <typename T>\nbool validate_results(std::vector<T> &v1,\n                      std::vector<T> &v2,\n                      double epsilon = 1e-6) {\n    double lower_threshold = 1e-3;\n\n    if (v1.size() != v2.size())\n        return false;\n    if (v1.size() == 0)\n        return true;\n\n    for (size_t i = 0; i < v1.size(); ++i) {\n        if constexpr (is_complex<T>::value) {\n            if (std::isnan(v1[i].real()) || std::isnan(v1[i].imag()) ||\n                std::isnan(v2[i].real()) || std::isnan(v2[i].imag())) {\n                return false;\n            }\n        } else {\n            if (std::isnan(v1[i]) || std::isnan(v2[i])) {\n                return false;\n            }\n        }\n\n        double abs1 = std::abs(v1[i]); // works for both: real |x|, complex |z|\n        double abs2 = std::abs(v2[i]);\n        double diff = std::abs(v1[i] - v2[i]);\n        double scale = std::max({lower_threshold, abs1, abs2});\n\n        if (diff / scale > epsilon) {\n            std::cout << \"epsilon = \" << epsilon << \", v1 = \" << v1[i]\n                      << \", which is != \" << v2[i] << std::endl;\n            return false;\n        }\n    }\n    return true;\n}\n\n// runs cosma or scalapack pdgemm wrapper for n_rep times and returns\n// a vector of timings (in milliseconds) of size n_rep\ntemplate <typename T>\nbool benchmark_pxgemm(cosma::pxgemm_params<T> &params,\n                      MPI_Comm comm,\n                      int n_rep,\n                      const std::string &algorithm,\n                      std::vector<long> &cosma_times,\n                      std::vector<long> &scalapack_times,\n                      bool test_correctness = false,\n                      bool exit_blacs = false) {\n    assert(algorithm == \"both\" || algorithm == \"cosma\" ||\n           algorithm == \"scalapack\");\n    if (algorithm == \"both\" || algorithm == \"cosma\") {\n        cosma_times.resize(n_rep);\n    }\n    if (algorithm == \"both\" || algorithm == \"scalapack\") {\n        scalapack_times.resize(n_rep);\n    }\n\n    // create the context here, so that\n    // it doesn't have to be created later\n    // (this is not necessary)\n    int rank;\n    MPI_Comm_rank(comm, &rank);\n    auto cosma_ctx = cosma::get_context_instance<T>();\n\n#ifdef DEBUG\n    if (rank == 0) {\n        cosma_ctx->turn_on_output();\n    }\n#endif\n\n    // ************************************\n    // *    scalapack processor grid      *\n    // ************************************\n    int ctxt = cosma::blacs::Csys2blacs_handle(comm);\n    cosma::blacs::Cblacs_gridinit(\n        &ctxt, &params.order, params.p_rows, params.p_cols);\n\n    // ************************************\n    // *   scalapack array descriptors    *\n    // ************************************\n    int info;\n    // matrix A\n    std::array<int, 9> desca;\n    scalapack::descinit_(&desca[0],\n                         &params.ma,\n                         &params.na,\n                         &params.bma,\n                         &params.bna,\n                         &params.src_ma,\n                         &params.src_na,\n                         &ctxt,\n                         &params.lld_a,\n                         &info);\n    if (rank == 0 && info != 0) {\n        std::cout << \"ERROR: descinit, argument: \" << -info\n                  << \" has an illegal value!\" << std::endl;\n    }\n\n    // matrix B\n    std::array<int, 9> descb;\n    scalapack::descinit_(&descb[0],\n                         &params.mb,\n                         &params.nb,\n                         &params.bmb,\n                         &params.bnb,\n                         &params.src_mb,\n                         &params.src_nb,\n                         &ctxt,\n                         &params.lld_b,\n                         &info);\n\n    if (rank == 0 && info != 0) {\n        std::cout << \"ERROR: descinit, argument: \" << -info\n                  << \" has an illegal value!\" << std::endl;\n    }\n\n    // matrix C\n    std::array<int, 9> descc;\n    scalapack::descinit_(&descc[0],\n                         &params.mc,\n                         &params.nc,\n                         &params.bmc,\n                         &params.bnc,\n                         &params.src_mc,\n                         &params.src_nc,\n                         &ctxt,\n                         &params.lld_c,\n                         &info);\n    if (rank == 0 && info != 0) {\n        std::cout << \"ERROR: descinit, argument: \" << -info\n                  << \" has an illegal value!\" << std::endl;\n    }\n\n    // ************************************\n    // *   scalapack memory allocations   *\n    // ************************************\n    int size_a = cosma::scalapack::local_buffer_size(&desca[0]);\n    int size_b = cosma::scalapack::local_buffer_size(&descb[0]);\n    int size_c = cosma::scalapack::local_buffer_size(&descc[0]);\n\n    std::vector<T> a;\n    std::vector<T> b;\n    std::vector<T> c_cosma;\n    std::vector<T> c_scalapack;\n\n    try {\n        a = std::vector<T>(size_a);\n        b = std::vector<T>(size_b);\n        if (algorithm == \"both\" || algorithm == \"cosma\") {\n            c_cosma = std::vector<T>(size_c);\n        }\n        if (algorithm == \"both\" || algorithm == \"scalapack\") {\n            c_scalapack = std::vector<T>(size_c);\n        }\n    } catch (const std::bad_alloc &e) {\n        std::cout\n            << \"COSMA (pxgemm_utils): not enough space to store the initial \"\n               \"local matrices. The problem size is too large. Either decrease \"\n               \"the problem size or run it on more nodes/ranks.\"\n            << std::endl;\n        cosma::blacs::Cblacs_gridexit(ctxt);\n        int dont_finalize_mpi = 1;\n        cosma::blacs::Cblacs_exit(dont_finalize_mpi);\n        throw;\n    } catch (const std::length_error &e) {\n        std::cout << \"COSMA (pxgemm_utils): the initial local size of matrices \"\n                     \">= vector::max_size(). Try using std::array or similar \"\n                     \"in cosma/utils/pxgemm_utils.cpp instead of vectors to \"\n                     \"store the initial matrices.\"\n                  << std::endl;\n        cosma::blacs::Cblacs_gridexit(ctxt);\n        int dont_finalize_mpi = 1;\n        cosma::blacs::Cblacs_exit(dont_finalize_mpi);\n        throw;\n    } catch (const std::exception &e) {\n        std::cout << \"COSMA (pxgemm_utils): unknown exception, potentially a \"\n                     \"bug. Please inform us of the test-case.\"\n                  << std::endl;\n        cosma::blacs::Cblacs_gridexit(ctxt);\n        int dont_finalize_mpi = 1;\n        cosma::blacs::Cblacs_exit(dont_finalize_mpi);\n        throw;\n    }\n\n    for (int i = 0; i < n_rep; ++i) {\n        // refill the matrices with random data to avoid\n        // reusing the cache in subsequent iterations\n        fill_randomly(a);\n        fill_randomly(b);\n        if (algorithm == \"both\") {\n            // if beta == 0, C should not be read; fill with NaN to catch any\n            // accidental reads\n            if (params.beta == T{0}) {\n                fill_nan(\n                    c_cosma, descc, params.ic, params.jc, params.m, params.n);\n                fill_nan(c_scalapack,\n                         descc,\n                         params.ic,\n                         params.jc,\n                         params.m,\n                         params.n);\n            } else {\n                fill_randomly(c_cosma);\n                // in case beta > 0, this is important in order to get the same\n                // results\n                c_scalapack = c_cosma;\n            }\n        } else if (algorithm == \"cosma\") {\n            if (params.beta == T{0})\n                fill_nan(\n                    c_cosma, descc, params.ic, params.jc, params.m, params.n);\n            else\n                fill_randomly(c_cosma);\n        } else {\n            if (params.beta == T{0})\n                fill_nan(c_scalapack,\n                         descc,\n                         params.ic,\n                         params.jc,\n                         params.m,\n                         params.n);\n            else\n                fill_randomly(c_scalapack);\n        }\n\n        if (algorithm == \"both\" || algorithm == \"cosma\") {\n            // ***********************************\n            //       run COSMA PDGEMM\n            // ***********************************\n            // running COSMA wrapper\n            long time = 0;\n            MPI_Barrier(comm);\n            auto start = std::chrono::steady_clock::now();\n            cosma::pxgemm<T>(params.trans_a,\n                             params.trans_b,\n                             params.m,\n                             params.n,\n                             params.k,\n                             params.alpha,\n                             a.data(),\n                             params.ia,\n                             params.ja,\n                             &desca[0],\n                             b.data(),\n                             params.ib,\n                             params.jb,\n                             &descb[0],\n                             params.beta,\n                             c_cosma.data(),\n                             params.ic,\n                             params.jc,\n                             &descc[0]);\n            MPI_Barrier(comm);\n            auto end = std::chrono::steady_clock::now();\n            time = std::chrono::duration_cast<std::chrono::milliseconds>(end -\n                                                                         start)\n                       .count();\n            cosma_times[i] = time;\n        }\n\n        if (algorithm == \"both\" || algorithm == \"scalapack\") {\n            // ***********************************\n            //       run ScaLAPACK PDGEMM\n            // ***********************************\n            // running ScaLAPACK\n            long time = 0;\n            MPI_Barrier(comm);\n            auto start = std::chrono::steady_clock::now();\n            scalapack_pxgemm<T>::pxgemm(&params.trans_a,\n                                        &params.trans_b,\n                                        &params.m,\n                                        &params.n,\n                                        &params.k,\n                                        &params.alpha,\n                                        a.data(),\n                                        &params.ia,\n                                        &params.ja,\n                                        &desca[0],\n                                        b.data(),\n                                        &params.ib,\n                                        &params.jb,\n                                        &descb[0],\n                                        &params.beta,\n                                        c_scalapack.data(),\n                                        &params.ic,\n                                        &params.jc,\n                                        &descc[0]);\n            MPI_Barrier(comm);\n            auto end = std::chrono::steady_clock::now();\n            time = std::chrono::duration_cast<std::chrono::milliseconds>(end -\n                                                                         start)\n                       .count();\n            scalapack_times[i] = time;\n        }\n    }\n\n    if (algorithm == \"both\" || algorithm == \"cosma\") {\n        std::sort(cosma_times.rbegin(), cosma_times.rend());\n    }\n    if (algorithm == \"both\" || algorithm == \"scalapack\") {\n        std::sort(scalapack_times.rbegin(), scalapack_times.rend());\n    }\n\n    // if algorithm != both than we don't check the correctness,\n    // also if test_correctness flat is set to false\n    bool correct = algorithm != \"both\" || !test_correctness ||\n                   validate_results(c_cosma, c_scalapack);\n\n    // exit blacs context\n    cosma::blacs::Cblacs_gridexit(ctxt);\n    if (exit_blacs) {\n        int dont_finalize_mpi = 1;\n        cosma::blacs::Cblacs_exit(dont_finalize_mpi);\n    }\n\n    return correct;\n}\n\ntemplate <typename T>\nbool test_pxgemm(cosma::pxgemm_params<T> &params,\n                 MPI_Comm comm,\n                 bool test_correctness = true,\n                 bool exit_blacs = false) {\n    std::vector<long> t1;\n    std::vector<long> t2;\n    int n_rep = 1;\n    return benchmark_pxgemm(\n        params, comm, n_rep, \"both\", t1, t2, true, exit_blacs);\n}\n"
  }
]