[
  {
    "path": ".github/workflows/linux.yml",
    "content": "name: Linux\non:\n  workflow_dispatch:\n  pull_request:\n  push:\n    branches: [master]\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.job }}-${{ github.ref }}\n  cancel-in-progress: true\ndefaults:\n  run:\n    shell: bash -e -l {0}\njobs:\n  build:\n    runs-on: ubuntu-24.04\n    name: ${{ matrix.sys.compiler }} ${{ matrix.sys.version }}\n    strategy:\n      fail-fast: false\n      matrix:\n        sys:\n        - {compiler: gcc, version: '11'}\n        - {compiler: gcc, version: '12'}\n        - {compiler: gcc, version: '13'}\n        - {compiler: gcc, version: '14'}\n        - {compiler: clang, version: '17'}\n        - {compiler: clang, version: '18'}\n        - {compiler: clang, version: '19'}\n        - {compiler: clang, version: '20'}\n\n    steps:\n    - name: Install GCC\n      if: matrix.sys.compiler == 'gcc'\n      uses: egor-tensin/setup-gcc@v1\n      with:\n        version: ${{matrix.sys.version}}\n        platform: x64\n\n    - name: Install LLVM and Clang\n      if: matrix.sys.compiler == 'clang'\n      run: |\n        wget https://apt.llvm.org/llvm.sh\n        chmod +x llvm.sh\n        sudo ./llvm.sh ${{matrix.sys.version}}\n        sudo apt-get install -y clang-tools-${{matrix.sys.version}}\n        sudo update-alternatives --install /usr/bin/clang++                 clang++                     /usr/bin/clang++-${{matrix.sys.version}} 200\n        sudo update-alternatives --install /usr/bin/clang                       clang                         /usr/bin/clang-${{matrix.sys.version}} 200\n        sudo update-alternatives --install /usr/bin/clang-scan-deps   clang-scan-deps     /usr/bin/clang-scan-deps-${{matrix.sys.version}} 200\n        sudo update-alternatives --set clang /usr/bin/clang-${{matrix.sys.version}}\n        sudo update-alternatives --set clang++ /usr/bin/clang++-${{matrix.sys.version}}\n        sudo update-alternatives --set clang-scan-deps /usr/bin/clang-scan-deps-${{matrix.sys.version}}\n\n    - name: Checkout code\n      uses: actions/checkout@v3\n\n    - name: Set conda environment\n      uses: mamba-org/setup-micromamba@v1\n      with:\n        environment-file: environment-dev.yml\n        cache-environment: true\n\n    - name: Configure using CMake\n      run: cmake -G Ninja -Bbuild -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -DPYTHON_EXECUTABLE=`which python` -DDOWNLOAD_GTEST=ON $(Build.SourcesDirectory)\n\n    - name: Install\n      working-directory: build\n      run: cmake --install .\n\n    - name: Build\n      working-directory: build\n      run: cmake --build . --target test_xtensor_python --parallel 8\n\n    - name: Run tests (C++)\n      working-directory: build/test\n      run: ./test_xtensor_python\n\n    - name: Run tests (Python)\n      run: pytest -s\n\n    - name: Example - readme 1\n      working-directory: docs/source/examples/readme_example_1\n      run: |\n        cmake -Bbuild -DPython_EXECUTABLE=`which python`\n        cd build\n        cmake --build .\n        cp ../example.py .\n        python example.py\n\n    - name: Example - copy \\'cast\\'\n      working-directory: docs/source/examples/copy_cast\n      run: |\n        cmake -Bbuild -DPython_EXECUTABLE=`which python`\n        cd build\n        cmake --build .\n        cp ../example.py .\n        python example.py\n\n    - name: Example - SFINAE\n      working-directory: docs/source/examples/sfinae\n      run: |\n        cmake -Bbuild -DPython_EXECUTABLE=`which python`\n        cd build\n        cmake --build .\n        cp ../example.py .\n        python example.py\n"
  },
  {
    "path": ".github/workflows/osx.yml",
    "content": "name: OSX\non:\n  workflow_dispatch:\n  pull_request:\n  push:\n    branches: [master]\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.job }}-${{ github.ref }}\n  cancel-in-progress: true\ndefaults:\n  run:\n    shell: bash -e -l {0}\njobs:\n  build:\n    runs-on: macos-${{ matrix.os }}\n    name: macos-${{ matrix.os }}\n    strategy:\n      fail-fast: false\n      matrix:\n        os:\n        - 14\n        - 15\n\n    steps:\n\n    - name: Checkout code\n      uses: actions/checkout@v3\n\n    - name: Set conda environment\n      uses: mamba-org/setup-micromamba@v1\n      with:\n        environment-file: environment-dev.yml\n        cache-environment: true\n\n    - name: Configure using CMake\n      run: cmake -G Ninja -Bbuild -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -DPYTHON_EXECUTABLE=`which python` -DDOWNLOAD_GTEST=ON $(Build.SourcesDirectory)\n\n    - name: Install\n      working-directory: build\n      run: cmake --install .\n\n    - name: Build\n      working-directory: build\n      run: cmake --build . --target test_xtensor_python --parallel 8\n\n    - name: Run tests (C++)\n      working-directory: build/test\n      run: ./test_xtensor_python\n\n    - name: Run tests (Python)\n      run: pytest -s\n\n    - name: Example - readme 1\n      working-directory: docs/source/examples/readme_example_1\n      run: |\n        cmake -Bbuild -DPython_EXECUTABLE=`which python`\n        cd build\n        cmake --build .\n        cp ../example.py .\n        python example.py\n\n    - name: Example - copy \\'cast\\'\n      working-directory: docs/source/examples/copy_cast\n      run: |\n        cmake -Bbuild -DPython_EXECUTABLE=`which python`\n        cd build\n        cmake --build .\n        cp ../example.py .\n        python example.py\n\n    - name: Example - SFINAE\n      working-directory: docs/source/examples/sfinae\n      run: |\n        cmake -Bbuild -DPython_EXECUTABLE=`which python`\n        cd build\n        cmake --build .\n        cp ../example.py .\n        python example.py\n"
  },
  {
    "path": ".github/workflows/windows.yml",
    "content": "name: Windows\non:\n  workflow_dispatch:\n  pull_request:\n  push:\n    branches: [master]\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.job }}-${{ github.ref }}\n  cancel-in-progress: true\ndefaults:\n  run:\n    shell: bash -e -l {0}\njobs:\n  build:\n    runs-on: [windows-latest]\n    name: Windows\n    \n    steps:\n    \n    - name: Setup MSVC\n      uses: ilammy/msvc-dev-cmd@v1\n\n    - name: Checkout code\n      uses: actions/checkout@v3\n\n    - name: Set conda environment\n      uses: mamba-org/setup-micromamba@v1\n      with:\n        environment-file: environment-dev.yml\n        cache-environment: true\n\n    - name: Configure using CMake\n      run: cmake -G Ninja -Bbuild -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -DPYTHON_EXECUTABLE=`which python` -DDOWNLOAD_GTEST=ON $(Build.SourcesDirectory)\n\n    - name: Install\n      working-directory: build\n      run: cmake --install .\n\n    - name: Build\n      working-directory: build\n      run: cmake --build . --target test_xtensor_python --parallel 8\n\n    - name: Run tests (C++)\n      working-directory: build/test\n      run: ./test_xtensor_python\n\n    - name: Run tests (Python)\n      run: pytest -s\n"
  },
  {
    "path": ".gitignore",
    "content": "# Prerequisites\n*.d\n\n# Compiled Object files\n*.slo\n*.lo\n*.o\n*.obj\n\n# Precompiled Headers\n*.gch\n*.pch\n\n# Compiled Dynamic libraries\n*.so\n*.dylib\n*.dll\n\n# Compiled Static libraries\n*.lai\n*.la\n*.a\n*.lib\n\n# Executables\n*.exe\n*.out\n*.app\n\n# Vim tmp files\n*.swp\n\n# Build directory\nbuild/\n\n# Test build artefacts\ntest/test_xtensor_python\ntest/CMakeCache.txt\ntest/Makefile\ntest/CMakeFiles/\ntest/cmake_install.cmake\n.pytest_cache/\n\n# Documentation build artefacts\ndocs/CMakeCache.txt\ndocs/xml/\ndocs/build/\n\n# Jupyter artefacts\n.ipynb_checkpoints/\n\n# Python\n*.py[cod]\n__pycache__\nbuild\n*.egg-info\n\n# py.test\n.cache/\n"
  },
  {
    "path": "CMakeLists.txt",
    "content": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          #\n# Copyright (c) QuantStack                                                 #\n#                                                                          #\n# Distributed under the terms of the BSD 3-Clause License.                 #\n#                                                                          #\n# The full license is in the file LICENSE, distributed with this software. #\n############################################################################\n\ncmake_minimum_required(VERSION 3.29)\nproject(xtensor-python)\n\nset(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake ${CMAKE_MODULE_PATH})\nset(XTENSOR_PYTHON_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include)\n\n# Versioning\n# ==========\n\nset(XTENSOR_PYTHON_CONFIG_FILE\n    \"${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/xtensor_python_config.hpp\")\nfile(STRINGS ${XTENSOR_PYTHON_CONFIG_FILE} xtensor_python_version_defines\n    REGEX \"#define XTENSOR_PYTHON_VERSION_(MAJOR|MINOR|PATCH)\")\nforeach(ver ${xtensor_python_version_defines})\n    if(ver MATCHES \"#define XTENSOR_PYTHON_VERSION_(MAJOR|MINOR|PATCH) +([^ ]+)$\")\n        set(XTENSOR_PYTHON_VERSION_${CMAKE_MATCH_1} \"${CMAKE_MATCH_2}\" CACHE INTERNAL \"\")\n    endif()\nendforeach()\nset(${PROJECT_NAME}_VERSION\n    ${XTENSOR_PYTHON_VERSION_MAJOR}.${XTENSOR_PYTHON_VERSION_MINOR}.${XTENSOR_PYTHON_VERSION_PATCH})\nmessage(STATUS \"xtensor-python v${${PROJECT_NAME}_VERSION}\")\n\n# Dependencies\n# ============\n\nset(xtensor_REQUIRED_VERSION 0.27.0)\nif(TARGET xtensor)\n    set(xtensor_VERSION ${XTENSOR_VERSION_MAJOR}.${XTENSOR_VERSION_MINOR}.${XTENSOR_VERSION_PATCH})\n    # Note: This is not SEMVER compatible comparison\n    if( NOT ${xtensor_VERSION} VERSION_GREATER_EQUAL ${xtensor_REQUIRED_VERSION})\n        message(ERROR \"Mismatch xtensor versions. Found '${xtensor_VERSION}' but requires: '${xtensor_REQUIRED_VERSION}'\")\n    else()\n        message(STATUS \"Found xtensor v${xtensor_VERSION}\")\n    endif()\nelse()\n    find_package(xtensor ${xtensor_REQUIRED_VERSION} REQUIRED)\n    message(STATUS \"Found xtensor: ${xtensor_INCLUDE_DIRS}/xtensor\")\nendif()\n\nfind_package(Python COMPONENTS Interpreter REQUIRED)\n\nset(pybind11_REQUIRED_VERSION 3.0.0)\nif (NOT TARGET pybind11::headers)\n    # Defaults to ON for cmake >= 3.18\n    # https://github.com/pybind/pybind11/blob/35ff42b56e9d34d9a944266eb25f2c899dbdfed7/CMakeLists.txt#L96\n    set(PYBIND11_FINDPYTHON OFF)\n    find_package(pybind11 ${pybind11_REQUIRED_VERSION} REQUIRED)\n    message(STATUS \"Found pybind11: ${pybind11_INCLUDE_DIRS}/pybind11\")\nelse ()\n    # pybind11 has a variable that indicates its version already, so use that\n    message(STATUS \"Found pybind11 v${pybind11_VERSION}\")\nendif ()\n\n# Look for NumPy headers, except if NUMPY_INCLUDE_DIRS is passed,\n# which is required under some circumstances (such as wasm, where\n# there is no real python executable)\nif(NOT NUMPY_INCLUDE_DIRS)\n    find_package(NumPy REQUIRED)\nendif()\nmessage(STATUS \"Found numpy: ${NUMPY_INCLUDE_DIRS}\")\n\n# Build\n# =====\n\nset(XTENSOR_PYTHON_HEADERS\n        ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pyarray.hpp\n        ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pyarray_backstrides.hpp\n        ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pycontainer.hpp\n        ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pynative_casters.hpp\n        ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pystrides_adaptor.hpp\n        ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pytensor.hpp\n        ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pyvectorize.hpp\n        ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/xtensor_python_config.hpp\n        ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/xtensor_type_caster_base.hpp\n        )\n\nadd_library(xtensor-python INTERFACE)\ntarget_include_directories(xtensor-python INTERFACE\n    \"$<BUILD_INTERFACE:${XTENSOR_PYTHON_INCLUDE_DIR};${pybind11_INCLUDE_DIRS};${NUMPY_INCLUDE_DIRS}>\"\n    $<INSTALL_INTERFACE:include>)\ntarget_link_libraries(xtensor-python INTERFACE xtensor)\nget_target_property(inc_dir xtensor-python INTERFACE_INCLUDE_DIRECTORIES)\n\nOPTION(BUILD_TESTS \"xtensor test suite\" OFF)\nOPTION(DOWNLOAD_GTEST \"build gtest from downloaded sources\" OFF)\n\nif(DOWNLOAD_GTEST OR GTEST_SRC_DIR)\n    set(BUILD_TESTS ON)\nendif()\n\nif(BUILD_TESTS)\n    if(MSVC)\n        set(PYTHON_MODULE_EXTENSION \".pyd\")\n    else()\n        set(PYTHON_MODULE_EXTENSION \".so\")\n    endif()\n\n    add_subdirectory(test)\n    add_subdirectory(benchmark)\nendif()\n\n# Installation\n# ============\n\ninclude(GNUInstallDirs)\ninclude(CMakePackageConfigHelpers)\n\ninstall(TARGETS xtensor-python\n        EXPORT ${PROJECT_NAME}-targets)\n\n# Makes the project importable from the build directory\nexport(EXPORT ${PROJECT_NAME}-targets\n       FILE \"${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Targets.cmake\")\n\ninstall(FILES ${XTENSOR_PYTHON_HEADERS}\n        DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/xtensor-python)\n\n\nconfigure_file(${PROJECT_NAME}.pc.in\n               \"${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc\"\n                @ONLY)\ninstall(FILES \"${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc\"\n        DESTINATION \"${CMAKE_INSTALL_DATADIR}/pkgconfig/\")\n\nset(XTENSOR_PYTHON_CMAKECONFIG_INSTALL_DIR \"${CMAKE_INSTALL_DATADIR}/cmake/${PROJECT_NAME}\" CACHE\n    STRING \"install path for xtensor-pythonConfig.cmake\")\n\nconfigure_package_config_file(${PROJECT_NAME}Config.cmake.in\n                              \"${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake\"\n                              INSTALL_DESTINATION ${XTENSOR_PYTHON_CMAKECONFIG_INSTALL_DIR})\n\n# xtensor-python is header-only and does not depend on the architecture.\n# Remove CMAKE_SIZEOF_VOID_P from xtensor-pythonConfigVersion.cmake so that an xtensor-pythonConfig.cmake\n# generated for a 64 bit target can be used for 32 bit targets and vice versa.\nset(_XTENSOR_CMAKE_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P})\nunset(CMAKE_SIZEOF_VOID_P)\nwrite_basic_package_version_file(${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake\n                                 VERSION ${${PROJECT_NAME}_VERSION}\n                                 COMPATIBILITY AnyNewerVersion)\nset(CMAKE_SIZEOF_VOID_P ${_XTENSOR_CMAKE_SIZEOF_VOID_P})\ninstall(FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake\n              ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake\n        DESTINATION ${XTENSOR_PYTHON_CMAKECONFIG_INSTALL_DIR})\ninstall(EXPORT ${PROJECT_NAME}-targets\n        FILE ${PROJECT_NAME}Targets.cmake\n        DESTINATION ${XTENSOR_PYTHON_CMAKECONFIG_INSTALL_DIR})\n\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright (c) 2016, Wolf Vollprecht, Johan Mabille and Sylvain Corlay\nCopyright (c) 2016, QuantStack\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "README.md",
    "content": "# ![xtensor-python](docs/source/xtensor-python.svg)\n\n[![GHA Linux](https://github.com/xtensor-stack/xtensor-python/actions/workflows/linux.yml/badge.svg)](https://github.com/xtensor-stack/xtensor-python/actions/workflows/linux.yml)\n[![GHA OSX](https://github.com/xtensor-stack/xtensor-python/actions/workflows/osx.yml/badge.svg)](https://github.com/xtensor-stack/xtensor-python/actions/workflows/osx.yml)\n[![GHA Windows](https://github.com/xtensor-stack/xtensor-python/actions/workflows/windows.yml/badge.svg)](https://github.com/xtensor-stack/xtensor-python/actions/workflows/windows.yml)\n[![Documentation](http://readthedocs.org/projects/xtensor-python/badge/?version=latest)](https://xtensor-python.readthedocs.io/en/latest/?badge=latest)\n[![Zulip](https://img.shields.io/badge/social_chat-zulip-blue.svg)](https://xtensor.zulipchat.com/#narrow/channel/539553-Ask-anything)\n\nPython bindings for the [xtensor](https://github.com/xtensor-stack/xtensor) C++ multi-dimensional array library.\n\n - `xtensor` is a C++ library for multi-dimensional arrays enabling numpy-style broadcasting and lazy computing.\n - `xtensor-python` enables inplace use of numpy arrays in C++ with all the benefits from `xtensor`\n\n     - C++ universal function and broadcasting\n     - STL - compliant APIs.\n     - A broad coverage of numpy APIs (see [the numpy to xtensor cheat sheet](http://xtensor.readthedocs.io/en/latest/numpy.html)).\n\nThe Python bindings for `xtensor` are based on the [pybind11](https://github.com/pybind/pybind11/) C++ library, which enables seamless interoperability between C++ and Python.\n\n## Installation\n\n`xtensor-python` is a header-only library. We provide a package for the mamba (or conda) package manager.\n\n```bash\nmamba install -c conda-forge xtensor-python\n```\n\n## Documentation\n\nTo get started with using `xtensor-python`, check out the full documentation\n\nhttp://xtensor-python.readthedocs.io/\n\n## Usage\n\nxtensor-python offers two container types wrapping numpy arrays inplace to provide an xtensor semantics\n\n - `pytensor`\n - `pyarray`.\n\nBoth containers enable the numpy-style APIs of xtensor (see [the numpy to xtensor cheat sheet](http://xtensor.readthedocs.io/en/latest/numpy.html)).\n\n - On the one hand, `pyarray` has a dynamic number of dimensions. Just like numpy arrays, it can be reshaped with a shape of a different length (and the new shape is reflected on the python side).\n\n - On the other hand `pytensor` has a compile time number of dimensions, specified with a template parameter. Shapes of `pytensor` instances are stack allocated, making `pytensor` a significantly faster expression than `pyarray`.\n\n### Example 1: Use an algorithm of the C++ standard library on a numpy array inplace.\n\n**C++ code**\n\n```cpp\n#include <numeric>                        // Standard library import for std::accumulate\n#include <pybind11/pybind11.h>            // Pybind11 import to define Python bindings\n#include <xtensor/core/xmath.hpp>              // xtensor import for the C++ universal functions\n#define FORCE_IMPORT_ARRAY\n#include <xtensor-python/pyarray.hpp>     // Numpy bindings\n\ndouble sum_of_sines(xt::pyarray<double>& m)\n{\n    auto sines = xt::sin(m);  // sines does not actually hold values.\n    return std::accumulate(sines.begin(), sines.end(), 0.0);\n}\n\nPYBIND11_MODULE(xtensor_python_test, m)\n{\n    xt::import_numpy();\n    m.doc() = \"Test module for xtensor python bindings\";\n\n    m.def(\"sum_of_sines\", sum_of_sines, \"Sum the sines of the input values\");\n}\n```\n\n**Python Code**\n\n```python\nimport numpy as np\nimport xtensor_python_test as xt\n\nv = np.arange(15).reshape(3, 5)\ns = xt.sum_of_sines(v)\nprint(s)\n```\n\n**Outputs**\n\n```\n1.2853996391883833\n```\n\n**Working example**\n\nGet the working example here:\n\n*   [`CMakeLists.txt`](docs/source/examples/readme_example_1/CMakeLists.txt)\n*   [`main.cpp`](docs/source/examples/readme_example_1/main.cpp)\n*   [`example.py`](docs/source/examples/readme_example_1/example.py)\n\n### Example 2: Create a universal function from a C++ scalar function\n\n**C++ code**\n\n```cpp\n#include <pybind11/pybind11.h>\n#define FORCE_IMPORT_ARRAY\n#include <xtensor-python/pyvectorize.hpp>\n#include <numeric>\n#include <cmath>\n\nnamespace py = pybind11;\n\ndouble scalar_func(double i, double j)\n{\n    return std::sin(i) - std::cos(j);\n}\n\nPYBIND11_MODULE(xtensor_python_test, m)\n{\n    xt::import_numpy();\n    m.doc() = \"Test module for xtensor python bindings\";\n\n    m.def(\"vectorized_func\", xt::pyvectorize(scalar_func), \"\");\n}\n```\n\n**Python Code**\n\n```python\nimport numpy as np\nimport xtensor_python_test as xt\n\nx = np.arange(15).reshape(3, 5)\ny = [1, 2, 3, 4, 5]\nz = xt.vectorized_func(x, y)\nprint(z)\n```\n\n**Outputs**\n\n```\n[[-0.540302,  1.257618,  1.89929 ,  0.794764, -1.040465],\n [-1.499227,  0.136731,  1.646979,  1.643002,  0.128456],\n [-1.084323, -0.583843,  0.45342 ,  1.073811,  0.706945]]\n```\n\n## Installation\n\nWe provide a package for the conda package manager.\n\n```bash\nconda install -c conda-forge xtensor-python\n```\n\nThis will pull the dependencies to xtensor-python, that is `pybind11` and `xtensor`.\n\n## Project cookiecutter\n\nA template for a project making use of `xtensor-python` is available in the form of a cookiecutter [here](https://github.com/xtensor-stack/xtensor-python-cookiecutter).\n\nThis project is meant to help library authors get started with the xtensor python bindings.\n\nIt produces a project following the best practices for the packaging and distribution of Python extensions based on `xtensor-python`, including a `setup.py` file and a conda recipe.\n\n## Building and Running the Tests\n\nTesting `xtensor-python` requires `pytest`\n\n  ``` bash\n  py.test .\n  ```\n\nTo pick up changes in `xtensor-python` while rebuilding, delete the `build/` directory.\n\n## Building the HTML Documentation\n\n`xtensor-python`'s documentation is built with three tools\n\n - [doxygen](http://www.doxygen.org)\n - [sphinx](http://www.sphinx-doc.org)\n - [breathe](https://breathe.readthedocs.io)\n\nWhile doxygen must be installed separately, you can install breathe by typing\n\n```bash\npip install breathe\n```\n\nBreathe can also be installed with `conda`\n\n```bash\nconda install -c conda-forge breathe\n```\n\nFinally, build the documentation with\n\n```bash\nmake html\n```\n\nfrom the `docs` subdirectory.\n\n## Dependencies on `xtensor` and `pybind11`\n\n`xtensor-python` depends on the `xtensor` and `pybind11` libraries\n\n| `xtensor-python` | `xtensor` |  `pybind11`      |\n|------------------|-----------|------------------|\n| master           |  ^0.27.0  | >=2.6.1,<4       |\n| 0.29.0           |  ^0.27.0  | >=2.6.1,<4       |\n| 0.28.0           |  ^0.26.0  | >=2.6.1,<3       |\n| 0.27.0           |  ^0.25.0  | >=2.6.1,<3       |\n| 0.26.1           |  ^0.24.0  | ~2.4.3           |\n| 0.26.0           |  ^0.24.0  | ~2.4.3           |\n| 0.25.3           |  ^0.23.0  | ~2.4.3           |\n| 0.25.2           |  ^0.23.0  | ~2.4.3           |\n| 0.25.1           |  ^0.23.0  | ~2.4.3           |\n| 0.25.0           |  ^0.23.0  | ~2.4.3           |\n| 0.24.1           |  ^0.21.2  | ~2.4.3           |\n| 0.24.0           |  ^0.21.1  | ~2.4.3           |\n\nThese dependencies are automatically resolved when using the conda package manager.\n\n## License\n\nWe use a shared copyright model that enables all contributors to maintain the\ncopyright on their contributions.\n\nThis software is licensed under the BSD-3-Clause license. See the [LICENSE](LICENSE) file for details.\n"
  },
  {
    "path": "benchmark/CMakeLists.txt",
    "content": "############################################################################\n# Copyright (c) 2016, Johan Mabille and Sylvain Corlay                     #\n#                                                                          #\n# Distributed under the terms of the BSD 3-Clause License.                 #\n#                                                                          #\n# The full license is in the file LICENSE, distributed with this software. #\n############################################################################\n\nif (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)\n    project(xtensor-python-benchmark)\n\n    find_package(xtensor-python REQUIRED CONFIG)\n    set(XTENSOR_PYTHON_INCLUDE_DIR ${xtensor-python_INCLUDE_DIRS})\nendif ()\n\nmessage(STATUS \"Forcing tests build type to Release\")\nset(CMAKE_BUILD_TYPE Release CACHE STRING \"Choose the type of build.\" FORCE)\n\ninclude(CheckCXXCompilerFlag)\n\nstring(TOUPPER \"${CMAKE_BUILD_TYPE}\" U_CMAKE_BUILD_TYPE)\n\nif (CMAKE_CXX_COMPILER_ID MATCHES \"Clang\" OR CMAKE_CXX_COMPILER_ID MATCHES \"GNU\" OR CMAKE_CXX_COMPILER_ID MATCHES \"Intel\")\n    set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -march=native -Wunused-parameter -Wextra -Wreorder -Wconversion\")\n    CHECK_CXX_COMPILER_FLAG(\"-std=c++14\" HAS_CPP14_FLAG)\n\n    if (HAS_CPP14_FLAG)\n        set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -std=c++14\")\n    else()\n        message(FATAL_ERROR \"Unsupported compiler -- xtensor requires C++14 support!\")\n    endif()\n\n    # Enable link time optimization and set the default symbol\n    # visibility to hidden (very important to obtain small binaries)\n    if (NOT ${U_CMAKE_BUILD_TYPE} MATCHES DEBUG)\n        # Default symbol visibility\n        set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fvisibility=hidden\")\n\n        # Check for Link Time Optimization support\n        # (GCC/Clang)\n        CHECK_CXX_COMPILER_FLAG(\"-flto\" HAS_LTO_FLAG)\n        if (HAS_LTO_FLAG)\n            set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -flto\")\n        endif()\n\n        # Intel equivalent to LTO is called IPO\n        if (CMAKE_CXX_COMPILER_ID MATCHES \"Intel\")\n            CHECK_CXX_COMPILER_FLAG(\"-ipo\" HAS_IPO_FLAG)\n            if (HAS_IPO_FLAG)\n                set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -ipo\")\n            endif()\n        endif()\n    endif()\nendif()\n\nif(MSVC)\n    set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} /EHsc /MP /bigobj\")\n    set(CMAKE_EXE_LINKER_FLAGS /MANIFEST:NO)\n    foreach(flag_var\n            CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE\n            CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)\n        string(REPLACE \"/MD\" \"-MT\" ${flag_var} \"${${flag_var}}\")\n    endforeach()\nendif()\n\nset(XTENSOR_PYTHON_BENCHMARK\n    main.cpp\n)\n\nset(XTENSOR_PYTHON_BENCHMARK_TARGET benchmark_xtensor_python)\nadd_library(${XTENSOR_PYTHON_BENCHMARK_TARGET} MODULE EXCLUDE_FROM_ALL\n            ${XTENSOR_PYTHON_BENCHMARK} ${XTENSOR_PYTHON_HEADERS})\n\nset_target_properties(${XTENSOR_PYTHON_BENCHMARK_TARGET} PROPERTIES PREFIX \"\")\nset_target_properties(${XTENSOR_PYTHON_BENCHMARK_TARGET} PROPERTIES SUFFIX \"${PYTHON_MODULE_EXTENSION}\")\n\nif (APPLE)\n    target_link_libraries(${XTENSOR_PYTHON_BENCHMARK_TARGET} PRIVATE \"-undefined dynamic_lookup\")\nelseif (MSVC)\n    target_link_libraries(${XTENSOR_PYTHON_BENCHMARK_TARGET} ${PYTHON_LIBRARIES})\nelse ()\n    target_link_libraries(${XTENSOR_PYTHON_BENCHMARK_TARGET} PRIVATE xtensor-python)\nendif()\n\nconfigure_file(benchmark_pyarray.py benchmark_pyarray.py COPYONLY)\nconfigure_file(benchmark_pytensor.py benchmark_pytensor.py COPYONLY)\nconfigure_file(benchmark_pybind_array.py benchmark_pybind_array.py COPYONLY)\nconfigure_file(benchmark_pyvectorize.py benchmark_pyvectorize.py COPYONLY)\nconfigure_file(benchmark_pybind_vectorize.py benchmark_pybind_vectorize.py COPYONLY)\n\nadd_custom_target(xbenchmark\n        COMMAND \"${PYTHON_EXECUTABLE}\" \"benchmark_pyarray.py\"\n        COMMAND \"${PYTHON_EXECUTABLE}\" \"benchmark_pytensor.py\"\n        COMMAND \"${PYTHON_EXECUTABLE}\" \"benchmark_pybind_array.py\"\n        COMMAND \"${PYTHON_EXECUTABLE}\" \"benchmark_pyvectorize.py\"\n        COMMAND \"${PYTHON_EXECUTABLE}\" \"benchmark_pybind_vectorize.py\"\n        DEPENDS ${XTENSOR_PYTHON_BENCHMARK_TARGET})\n\n"
  },
  {
    "path": "benchmark/benchmark_pyarray.py",
    "content": "from benchmark_xtensor_python import sum_array\nimport numpy as np\n\nu = np.ones(1000000, dtype=float)\nfrom timeit import timeit\nprint (timeit ('sum_array(u)', setup='from __main__ import u, sum_array', number=1000))\n"
  },
  {
    "path": "benchmark/benchmark_pybind_array.py",
    "content": "from benchmark_xtensor_python import pybind_sum_array\nimport numpy as np\n\nu = np.ones(1000000, dtype=float)\nfrom timeit import timeit\nprint (timeit ('pybind_sum_array(u)', setup='from __main__ import u, pybind_sum_array', number=1000))\n"
  },
  {
    "path": "benchmark/benchmark_pybind_vectorize.py",
    "content": "from benchmark_xtensor_python import pybind_rect_to_polar\nimport numpy as np\n\nfrom timeit import timeit\nw = np.ones(100000, dtype=complex)\nprint (timeit('pybind_rect_to_polar(w[::2])', 'from __main__ import w, pybind_rect_to_polar', number=1000))\n"
  },
  {
    "path": "benchmark/benchmark_pytensor.py",
    "content": "from benchmark_xtensor_python import sum_tensor\nimport numpy as np\n\nu = np.ones(1000000, dtype=float)\n#print(sum_tensor(u))\nfrom timeit import timeit\nprint (timeit ('sum_tensor(u)', setup='from __main__ import u, sum_tensor', number=1000))\n"
  },
  {
    "path": "benchmark/benchmark_pyvectorize.py",
    "content": "from benchmark_xtensor_python import rect_to_polar\nimport numpy as np\n\nfrom timeit import timeit\nw = np.ones(100000, dtype=complex)\nprint (timeit('rect_to_polar(w[::2])', 'from __main__ import w, rect_to_polar', number=1000))\n"
  },
  {
    "path": "benchmark/main.cpp",
    "content": "#include \"pybind11/pybind11.h\"\n#include \"pybind11/numpy.h\"\n#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION\n#include \"numpy/arrayobject.h\"\n#include \"xtensor/containers/xtensor.hpp\"\n#include \"xtensor/containers/xarray.hpp\"\n#include \"xtensor-python/pyarray.hpp\"\n#include \"xtensor-python/pytensor.hpp\"\n#include \"xtensor-python/pyvectorize.hpp\"\n\nusing complex_t = std::complex<double>;\n\nnamespace py = pybind11;\n\nPYBIND11_MODULE(benchmark_xtensor_python, m)\n{\n    if(_import_array() < 0)\n    {\n        PyErr_SetString(PyExc_ImportError, \"numpy.core.multiarray failed to import\");\n    }\n\n    m.doc() = \"Benchmark module for xtensor python bindings\";\n\n    m.def(\"sum_array\", [](xt::pyarray<double> const& x) {\n        double sum = 0;\n        for(auto e : x)\n            sum += e;\n        return sum;\n    });\n\n    m.def(\"sum_tensor\", [](xt::pytensor<double, 1> const& x) {\n        double sum = 0;\n        for(auto e : x)\n            sum += e;\n        return sum;\n    });\n\n    m.def(\"pybind_sum_array\", [](py::array_t<double> const& x) {\n        double sum = 0;\n        size_t size = x.size();\n        const double* data = x.data(0);\n        for(size_t i = 0; i < size; ++i)\n           sum += data[i];\n        return sum;\n    });\n\n    m.def(\"rect_to_polar\", [](xt::pyarray<complex_t> const& a) {\n        return py::vectorize([](complex_t x) { return std::abs(x); })(a);\n    });\n\n    m.def(\"pybind_rect_to_polar\", [](py::array a) {\n        if (py::isinstance<py::array_t<complex_t>>(a))\n            return py::vectorize([](complex_t x) { return std::abs(x); })(a);\n        else\n            throw py::type_error(\"rect_to_polar unhandled type\");\n    });\n}\n"
  },
  {
    "path": "benchmark/setup.py",
    "content": "from setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nimport sys\nimport os\nimport setuptools\n\n__version__ = '0.0.1'\n\n\nclass get_pybind_include(object):\n    \"\"\"Helper class to determine the pybind11 include path\n\n    The purpose of this class is to postpone importing pybind11\n    until it is actually installed, so that the ``get_include()``\n    method can be invoked. \"\"\"\n\n    def __init__(self, user=False):\n        self.user = user\n\n    def __str__(self):\n        import pybind11\n        return pybind11.get_include(self.user)\n\nclass get_numpy_include(object):\n    \"\"\"Helper class to determine the numpy include path\n\n    The purpose of this class is to postpone importing numpy\n    until it is actually installed, so that the ``get_include()``\n    method can be invoked. \"\"\"\n\n    def __str__(self):\n        import numpy\n        return numpy.get_include()\n\next_modules = [\n    Extension(\n        'benchmark_xtensor_python',\n        ['main.cpp'],\n        include_dirs=[\n            # Path to pybind11 headers\n            get_pybind_include(),\n            get_pybind_include(user=True),\n            # Path to numpy headers\n            get_numpy_include(),\n            os.path.join(sys.prefix, 'include'),\n            os.path.join(sys.prefix, 'Library', 'include')\n        ],\n        language='c++'\n    ),\n]\n\n\ndef has_flag(compiler, flagname):\n    \"\"\"Return a boolean indicating whether a flag name is supported on\n    the specified compiler.\n    \"\"\"\n    import tempfile\n    with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:\n        f.write('int main (int argc, char **argv) { return 0; }')\n        try:\n            compiler.compile([f.name], extra_postargs=[flagname])\n        except setuptools.distutils.errors.CompileError:\n            return False\n    return True\n\n\ndef cpp_flag(compiler):\n    \"\"\"Return the -std=c++14 compiler flag  and errors when the flag is\n    no available.\n    \"\"\"\n    if has_flag(compiler, '-std=c++14'):\n        return '-std=c++14'\n    else:\n        raise RuntimeError('C++14 support is required by xtensor!')\n\n\nclass BuildExt(build_ext):\n    \"\"\"A custom build extension for adding compiler-specific options.\"\"\"\n    c_opts = {\n        'msvc': ['/EHsc'],\n        'unix': [],\n    }\n\n    if sys.platform == 'darwin':\n        c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']\n\n    def build_extensions(self):\n        ct = self.compiler.compiler_type\n        opts = self.c_opts.get(ct, [])\n        if ct == 'unix':\n            opts.append('-DVERSION_INFO=\"%s\"' % self.distribution.get_version())\n            opts.append(cpp_flag(self.compiler))\n            if has_flag(self.compiler, '-fvisibility=hidden'):\n                opts.append('-fvisibility=hidden')\n        elif ct == 'msvc':\n            opts.append('/DVERSION_INFO=\\\\\"%s\\\\\"' % self.distribution.get_version())\n        for ext in self.extensions:\n            ext.extra_compile_args = opts\n        build_ext.build_extensions(self)\n\nsetup(\n    name='benchmark_xtensor_python',\n    version=__version__,\n    author='Sylvain Corlay',\n    author_email='sylvain.corlay@gmail.com',\n    url='https://github.com/pybind/python_example',\n    description='An example project using xtensor-python',\n    long_description='',\n    ext_modules=ext_modules,\n    install_requires=['pybind11>=2.2.1'],\n    cmdclass={'build_ext': BuildExt},\n    zip_safe=False,\n)\n"
  },
  {
    "path": "cmake/FindNumPy.cmake",
    "content": "# - Find the NumPy libraries\n# This module finds if NumPy is installed, and sets the following variables\n# indicating where it is.\n#\n# TODO: Update to provide the libraries and paths for linking npymath lib.\n#\n#  NUMPY_FOUND               - was NumPy found\n#  NUMPY_VERSION             - the version of NumPy found as a string\n#  NUMPY_VERSION_MAJOR       - the major version number of NumPy\n#  NUMPY_VERSION_MINOR       - the minor version number of NumPy\n#  NUMPY_VERSION_PATCH       - the patch version number of NumPy\n#  NUMPY_VERSION_DECIMAL     - e.g. version 1.6.1 is 10601\n#  NUMPY_INCLUDE_DIRS        - path to the NumPy include files\n\n#============================================================================\n# Copyright 2012 Continuum Analytics, Inc.\n#\n# MIT License\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files\n# (the \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n# \n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n# \n#============================================================================\n\n# Finding NumPy involves calling the Python interpreter\nif(NumPy_FIND_REQUIRED)\n    find_package(Python COMPONENTS Interpreter REQUIRED)\nelse()\n    find_package(Python COMPONENTS Interpreter)\nendif()\n\nif(NOT PYTHONINTERP_FOUND)\n    set(NUMPY_FOUND FALSE)\nendif()\n\nexecute_process(COMMAND \"${PYTHON_EXECUTABLE}\" \"-c\"\n    \"import numpy as n; print(n.__version__); print(n.get_include());\"\n    RESULT_VARIABLE _NUMPY_SEARCH_SUCCESS\n    OUTPUT_VARIABLE _NUMPY_VALUES\n    ERROR_VARIABLE _NUMPY_ERROR_VALUE\n    OUTPUT_STRIP_TRAILING_WHITESPACE)\n\nif(NOT _NUMPY_SEARCH_SUCCESS MATCHES 0)\n    if(NumPy_FIND_REQUIRED)\n        message(FATAL_ERROR\n            \"NumPy import failure:\\n${_NUMPY_ERROR_VALUE}\")\n    endif()\n    set(NUMPY_FOUND FALSE)\nendif()\n\n# Convert the process output into a list\nstring(REGEX REPLACE \";\" \"\\\\\\\\;\" _NUMPY_VALUES ${_NUMPY_VALUES})\nstring(REGEX REPLACE \"\\n\" \";\" _NUMPY_VALUES ${_NUMPY_VALUES})\nlist(GET _NUMPY_VALUES 0 NUMPY_VERSION)\nlist(GET _NUMPY_VALUES 1 NUMPY_INCLUDE_DIRS)\n\n# Make sure all directory separators are '/'\nstring(REGEX REPLACE \"\\\\\\\\\" \"/\" NUMPY_INCLUDE_DIRS ${NUMPY_INCLUDE_DIRS})\n\n# Get the major and minor version numbers\nstring(REGEX REPLACE \"\\\\.\" \";\" _NUMPY_VERSION_LIST ${NUMPY_VERSION})\nlist(GET _NUMPY_VERSION_LIST 0 NUMPY_VERSION_MAJOR)\nlist(GET _NUMPY_VERSION_LIST 1 NUMPY_VERSION_MINOR)\nlist(GET _NUMPY_VERSION_LIST 2 NUMPY_VERSION_PATCH)\nstring(REGEX MATCH \"[0-9]*\" NUMPY_VERSION_PATCH ${NUMPY_VERSION_PATCH})\nmath(EXPR NUMPY_VERSION_DECIMAL\n    \"(${NUMPY_VERSION_MAJOR} * 10000) + (${NUMPY_VERSION_MINOR} * 100) + ${NUMPY_VERSION_PATCH}\")\n\nfind_package_message(NUMPY\n    \"Found NumPy: version \\\"${NUMPY_VERSION}\\\" ${NUMPY_INCLUDE_DIRS}\"\n    \"${NUMPY_INCLUDE_DIRS}${NUMPY_VERSION}\")\n\nset(NUMPY_FOUND TRUE)\n"
  },
  {
    "path": "docs/Doxyfile",
    "content": "PROJECT_NAME      = \"xtensor-python\"\nXML_OUTPUT        = xml\nINPUT             = ../include\nGENERATE_LATEX    = NO\nGENERATE_MAN      = NO\nGENERATE_RTF      = NO\nCASE_SENSE_NAMES  = NO\nGENERATE_HTML     = NO\nGENERATE_XML      = YES\nRECURSIVE         = YES\nQUIET             = YES\nJAVADOC_AUTOBRIEF = YES\n"
  },
  {
    "path": "docs/Makefile",
    "content": "# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHINXBUILD   = sphinx-build\nPAPER         =\nBUILDDIR      = build\n\n# User-friendly check for sphinx-build\nifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)\n$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)\nendif\n\n# Internal variables.\nPAPEROPT_a4     = -D latex_paper_size=a4\nPAPEROPT_letter = -D latex_paper_size=letter\nALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source\n# the i18n builder cannot share the environment and doctrees with the others\nI18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source\n\n.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext api\n\ndefault: html\n\nhelp:\n\t@echo \"Please use \\`make <target>' where <target> is one of\"\n\t@echo \"  html       to make standalone HTML files\"\n\t@echo \"  dirhtml    to make HTML files named index.html in directories\"\n\t@echo \"  singlehtml to make a single large HTML file\"\n\t@echo \"  pickle     to make pickle files\"\n\t@echo \"  json       to make JSON files\"\n\t@echo \"  htmlhelp   to make HTML files and a HTML help project\"\n\t@echo \"  qthelp     to make HTML files and a qthelp project\"\n\t@echo \"  applehelp  to make an Apple Help Book\"\n\t@echo \"  devhelp    to make HTML files and a Devhelp project\"\n\t@echo \"  epub       to make an epub\"\n\t@echo \"  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter\"\n\t@echo \"  latexpdf   to make LaTeX files and run them through pdflatex\"\n\t@echo \"  latexpdfja to make LaTeX files and run them through platex/dvipdfmx\"\n\t@echo \"  text       to make text files\"\n\t@echo \"  man        to make manual pages\"\n\t@echo \"  texinfo    to make Texinfo files\"\n\t@echo \"  info       to make Texinfo files and run them through makeinfo\"\n\t@echo \"  gettext    to make PO message catalogs\"\n\t@echo \"  changes    to make an overview of all changed/added/deprecated items\"\n\t@echo \"  xml        to make Docutils-native XML files\"\n\t@echo \"  pseudoxml  to make pseudoxml-XML files for display purposes\"\n\t@echo \"  linkcheck  to check all external links for integrity\"\n\t@echo \"  doctest    to run all doctests embedded in the documentation (if enabled)\"\n\t@echo \"  coverage   to run coverage check of the documentation (if enabled)\"\n\nclean:\n\trm -rf $(BUILDDIR)/*\n\trm -rf xml\n\nhtml:\n\tdoxygen\n\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/html.\"\n\ndirhtml:\n\tdoxygen\n\t$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/dirhtml.\"\n\nsinglehtml:\n\tdoxygen\n\t$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml\n\t@echo\n\t@echo \"Build finished. The HTML page is in $(BUILDDIR)/singlehtml.\"\n\npickle:\n\tdoxygen\n\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle\n\t@echo\n\t@echo \"Build finished; now you can process the pickle files.\"\n\njson:\n\tdoxygen\n\t$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json\n\t@echo\n\t@echo \"Build finished; now you can process the JSON files.\"\n\nhtmlhelp:\n\tdoxygen\n\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp\n\t@echo\n\t@echo \"Build finished; now you can run HTML Help Workshop with the\" \\\n\t      \".hhp project file in $(BUILDDIR)/htmlhelp.\"\n\nepub:\n\tdoxygen\n\t$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub\n\t@echo\n\t@echo \"Build finished. The epub file is in $(BUILDDIR)/epub.\"\n\nlatex:\n\tdoxygen\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo\n\t@echo \"Build finished; the LaTeX files are in $(BUILDDIR)/latex.\"\n\t@echo \"Run \\`make' in that directory to run these through (pdf)latex\" \\\n\t      \"(use \\`make latexpdf' here to do that automatically).\"\n\nlatexpdf:\n\tdoxygen\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through pdflatex...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\nlatexpdfja:\n\tdoxygen\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through platex and dvipdfmx...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\ntext:\n\tdoxygen\n\t$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text\n\t@echo\n\t@echo \"Build finished. The text files are in $(BUILDDIR)/text.\"\n\nman:\n\tdoxygen\n\t$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man\n\t@echo\n\t@echo \"Build finished. The manual pages are in $(BUILDDIR)/man.\"\n\ntexinfo:\n\tdoxygen\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo\n\t@echo \"Build finished. The Texinfo files are in $(BUILDDIR)/texinfo.\"\n\t@echo \"Run \\`make' in that directory to run these through makeinfo\" \\\n\t      \"(use \\`make info' here to do that automatically).\"\n\ninfo:\n\tdoxygen\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo \"Running Texinfo files through makeinfo...\"\n\tmake -C $(BUILDDIR)/texinfo info\n\t@echo \"makeinfo finished; the Info files are in $(BUILDDIR)/texinfo.\"\n\ngettext:\n\tdoxygen\n\t$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale\n\t@echo\n\t@echo \"Build finished. The message catalogs are in $(BUILDDIR)/locale.\"\n\nchanges:\n\tdoxygen\n\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes\n\t@echo\n\t@echo \"The overview file is in $(BUILDDIR)/changes.\"\n\nlinkcheck:\n\tdoxygen\n\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck\n\t@echo\n\t@echo \"Link check complete; look for any errors in the above output \" \\\n\t      \"or in $(BUILDDIR)/linkcheck/output.txt.\"\n\ndoctest:\n\tdoxygen\n\t$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest\n\t@echo \"Testing of doctests in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/doctest/output.txt.\"\n\ncoverage:\n\tdoxygen\n\t$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage\n\t@echo \"Testing of coverage in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/coverage/python.txt.\"\n\nxml:\n\tdoxygen\n\t$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml\n\t@echo\n\t@echo \"Build finished. The XML files are in $(BUILDDIR)/xml.\"\n\npseudoxml:\n\tdoxygen\n\t$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml\n\t@echo\n\t@echo \"Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml.\"\n"
  },
  {
    "path": "docs/environment.yml",
    "content": "name: xtensor-python-docs\n\nchannels:\n  - conda-forge\n\ndependencies:\n  - breathe\n  - sphinx_rtd_theme\n"
  },
  {
    "path": "docs/make.bat",
    "content": "@ECHO OFF\r\n\r\nREM Command file for Sphinx documentation\r\n\r\nif \"%SPHINXBUILD%\" == \"\" (\r\n\tset SPHINXBUILD=sphinx-build\r\n)\r\nset BUILDDIR=build\r\nset ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source\r\nset I18NSPHINXOPTS=%SPHINXOPTS% source\r\nif NOT \"%PAPER%\" == \"\" (\r\n\tset ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%\r\n\tset I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%\r\n)\r\n\r\nif \"%1\" == \"\" goto help\r\n\r\nif \"%1\" == \"help\" (\r\n\t:help\r\n\techo.Please use `make ^<target^>` where ^<target^> is one of\r\n\techo.  html       to make standalone HTML files\r\n\techo.  dirhtml    to make HTML files named index.html in directories\r\n\techo.  singlehtml to make a single large HTML file\r\n\techo.  pickle     to make pickle files\r\n\techo.  json       to make JSON files\r\n\techo.  htmlhelp   to make HTML files and a HTML help project\r\n\techo.  qthelp     to make HTML files and a qthelp project\r\n\techo.  devhelp    to make HTML files and a Devhelp project\r\n\techo.  epub       to make an epub\r\n\techo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter\r\n\techo.  text       to make text files\r\n\techo.  man        to make manual pages\r\n\techo.  texinfo    to make Texinfo files\r\n\techo.  gettext    to make PO message catalogs\r\n\techo.  changes    to make an overview over all changed/added/deprecated items\r\n\techo.  xml        to make Docutils-native XML files\r\n\techo.  pseudoxml  to make pseudoxml-XML files for display purposes\r\n\techo.  linkcheck  to check all external links for integrity\r\n\techo.  doctest    to run all doctests embedded in the documentation if enabled\r\n\techo.  coverage   to run coverage check of the documentation if enabled\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"clean\" (\r\n\tfor /d %%i in (%BUILDDIR%\\*) do rmdir /q /s %%i\r\n\tdel /q /s %BUILDDIR%\\*\r\n\tgoto end\r\n)\r\n\r\n\r\nREM Check if sphinx-build is available and fallback to Python version if any\r\n%SPHINXBUILD% 1>NUL 2>NUL\r\nif errorlevel 9009 goto sphinx_python\r\ngoto sphinx_ok\r\n\r\n:sphinx_python\r\n\r\nset SPHINXBUILD=python -m sphinx.__init__\r\n%SPHINXBUILD% 2> nul\r\nif errorlevel 9009 (\r\n\techo.\r\n\techo.The 'sphinx-build' command was not found. Make sure you have Sphinx\r\n\techo.installed, then set the SPHINXBUILD environment variable to point\r\n\techo.to the full path of the 'sphinx-build' executable. Alternatively you\r\n\techo.may add the Sphinx directory to PATH.\r\n\techo.\r\n\techo.If you don't have Sphinx installed, grab it from\r\n\techo.http://sphinx-doc.org/\r\n\texit /b 1\r\n)\r\n\r\n:sphinx_ok\r\n\r\n\r\nif \"%1\" == \"html\" (\r\n        doxygen\r\n\t%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The HTML pages are in %BUILDDIR%/html.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"dirhtml\" (\r\n\t%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"singlehtml\" (\r\n\t%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"pickle\" (\r\n\t%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; now you can process the pickle files.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"json\" (\r\n\t%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; now you can process the JSON files.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"htmlhelp\" (\r\n\t%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; now you can run HTML Help Workshop with the ^\r\n.hhp project file in %BUILDDIR%/htmlhelp.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"qthelp\" (\r\n\t%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; now you can run \"qcollectiongenerator\" with the ^\r\n.qhcp project file in %BUILDDIR%/qthelp, like this:\r\n\techo.^> qcollectiongenerator %BUILDDIR%\\qthelp\\packagename.qhcp\r\n\techo.To view the help file:\r\n\techo.^> assistant -collectionFile %BUILDDIR%\\qthelp\\packagename.ghc\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"devhelp\" (\r\n\t%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"epub\" (\r\n\t%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The epub file is in %BUILDDIR%/epub.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"latex\" (\r\n\t%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished; the LaTeX files are in %BUILDDIR%/latex.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"latexpdf\" (\r\n\t%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex\r\n\tcd %BUILDDIR%/latex\r\n\tmake all-pdf\r\n\tcd %~dp0\r\n\techo.\r\n\techo.Build finished; the PDF files are in %BUILDDIR%/latex.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"latexpdfja\" (\r\n\t%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex\r\n\tcd %BUILDDIR%/latex\r\n\tmake all-pdf-ja\r\n\tcd %~dp0\r\n\techo.\r\n\techo.Build finished; the PDF files are in %BUILDDIR%/latex.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"text\" (\r\n\t%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The text files are in %BUILDDIR%/text.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"man\" (\r\n\t%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The manual pages are in %BUILDDIR%/man.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"texinfo\" (\r\n\t%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"gettext\" (\r\n\t%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The message catalogs are in %BUILDDIR%/locale.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"changes\" (\r\n\t%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.The overview file is in %BUILDDIR%/changes.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"linkcheck\" (\r\n\t%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Link check complete; look for any errors in the above output ^\r\nor in %BUILDDIR%/linkcheck/output.txt.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"doctest\" (\r\n\t%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Testing of doctests in the sources finished, look at the ^\r\nresults in %BUILDDIR%/doctest/output.txt.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"coverage\" (\r\n\t%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Testing of coverage in the sources finished, look at the ^\r\nresults in %BUILDDIR%/coverage/python.txt.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"xml\" (\r\n\t%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The XML files are in %BUILDDIR%/xml.\r\n\tgoto end\r\n)\r\n\r\nif \"%1\" == \"pseudoxml\" (\r\n\t%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml\r\n\tif errorlevel 1 exit /b 1\r\n\techo.\r\n\techo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.\r\n\tgoto end\r\n)\r\n\r\n:end\r\n"
  },
  {
    "path": "docs/source/_static/main_stylesheet.css",
    "content": ".wy-nav-content{\n    max-width: 1000px;\n    margin: auto;\n}\n"
  },
  {
    "path": "docs/source/api_reference.rst",
    "content": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n   Distributed under the terms of the BSD 3-Clause License.\n\n   The full license is in the file LICENSE, distributed with this software.\n\nAPI reference\n=============\n\nContainers\n----------\n\n.. toctree::\n   :maxdepth: 2\n\n   pyarray\n   pytensor\n\nNumpy universal functions\n-------------------------\n\n.. toctree::\n   :maxdepth: 2\n\n   pyvectorize\n"
  },
  {
    "path": "docs/source/array_tensor.rst",
    "content": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n   Distributed under the terms of the BSD 3-Clause License.\n\n   The full license is in the file LICENSE, distributed with this software.\n\nArrays and tensors\n==================\n\n``xtensor-python`` provides two container types wrapping numpy arrays: ``pyarray`` and ``pytensor``. They are the counterparts\nto ``xarray`` and ``xtensor`` containers.\n\npyarray\n-------\n\nLike ``xarray``, ``pyarray`` has a dynamic shape. This means that you can reshape the numpy array on the C++ side and see this\nchange reflected on the python side. ``pyarray`` doesn't make a copy of the shape or the strides, but reads them each time it\nis needed. Therefore, if a reference on a ``pyarray`` is kept in the C++ code and the corresponding numpy array is then reshaped\nin the python code, this modification will reflect in the ``pyarray``.\n\npytensor\n--------\n\nLike ``xtensor``, ``pytensor`` has a static stack-allocated shape. This means that the shape of the numpy array is copied into\nthe shape of the ``pytensor`` upon creation. As a consequence, reshapes are not reflected across languages. However, this drawback\nis offset by a more effective computation of shape and broadcast.\n\n"
  },
  {
    "path": "docs/source/basic_usage.rst",
    "content": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n   Distributed under the terms of the BSD 3-Clause License.\n\n   The full license is in the file LICENSE, distributed with this software.\n\nBasic Usage\n===========\n\nExample 1: Use an algorithm of the C++ library on a numpy array inplace\n-----------------------------------------------------------------------\n\n**C++ code**\n\n.. code::\n\n    #include <numeric>                        // Standard library import for std::accumulate\n    #include \"pybind11/pybind11.h\"            // Pybind11 import to define Python bindings\n    #include \"xtensor/core/xmath.hpp\"              // xtensor import for the C++ universal functions\n    #define FORCE_IMPORT_ARRAY                // numpy C api loading\n    #include \"xtensor-python/pyarray.hpp\"     // Numpy bindings\n\n    double sum_of_sines(xt::pyarray<double>& m)\n    {\n        auto sines = xt::sin(m);  // sines does not actually hold values.\n        return std::accumulate(sines.cbegin(), sines.cend(), 0.0);\n    }\n\n    PYBIND11_MODULE(xtensor_python_test, m)\n    {\n        xt::import_numpy();\n        m.doc() = \"Test module for xtensor python bindings\";\n\n        m.def(\"sum_of_sines\", sum_of_sines, \"Sum the sines of the input values\");\n    }\n\n**Python code:**\n\n.. code::\n\n    import numpy as np\n    import xtensor_python_test as xt\n\n    a = np.arange(15).reshape(3, 5)\n    s = xt.sum_of_sines(v)\n    s\n\n**Outputs**\n\n.. code::\n\n    1.2853996391883833\n\n\nExample 2: Create a numpy-style universal function from a C++ scalar function\n-----------------------------------------------------------------------------\n\n**C++ code**\n\n.. code::\n\n    #include \"pybind11/pybind11.h\"\n    #define FORCE_IMPORT_ARRAY\n    #include \"xtensor-python/pyvectorize.hpp\"\n    #include <numeric>\n    #include <cmath>\n\n    namespace py = pybind11;\n\n    double scalar_func(double i, double j)\n    {\n        return std::sin(i) - std::cos(j);\n    }\n\n    PYBIND11_MODULE(xtensor_python_test, m)\n    {\n        xt::import_numpy();\n        m.doc() = \"Test module for xtensor python bindings\";\n\n        m.def(\"vectorized_func\", xt::pyvectorize(scalar_func), \"\");\n    }\n\n**Python code:**\n\n.. code::\n\n    import numpy as np\n    import xtensor_python_test as xt\n\n    x = np.arange(15).reshape(3, 5)\n    y = [1, 2, 3, 4, 5]\n    z = xt.vectorized_func(x, y)\n    z\n\n**Outputs**\n\n.. code::\n\n    [[-0.540302,  1.257618,  1.89929 ,  0.794764, -1.040465],\n     [-1.499227,  0.136731,  1.646979,  1.643002,  0.128456],\n     [-1.084323, -0.583843,  0.45342 ,  1.073811,  0.706945]]\n\n"
  },
  {
    "path": "docs/source/compilers.rst",
    "content": ".. Copyright (c) 2016, Johan Mabille, Sylvain Corlay and Wolf Vollprecht\n\n   Distributed under the terms of the BSD 3-Clause License.\n\n   The full license is in the file LICENSE, distributed with this software.\n\nCompiler workarounds\n====================\n\nThis page tracks the workarounds for the various compiler issues that we\nencountered in the development. This is mostly of interest for developers\ninterested in contributing to xtensor-python.\n\nGCC and ``std::allocator<long long>``\n-------------------------------------\n\nGCC sometimes fails to automatically instantiate the ``std::allocator``\nclass template for the types ``long long`` and ``unsigned long long``.\nThose allocators are thus explicitly instantiated in the dummy function\n``void long_long_allocator()`` in the file ``py_container.hpp``.\n"
  },
  {
    "path": "docs/source/conf.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif on_rtd:\n    subprocess.call('cd ..; doxygen', shell=True)\n\nimport sphinx_rtd_theme\n\nhtml_theme = \"sphinx_rtd_theme\"\n\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\ndef setup(app):\n    app.add_css_file(\"main_stylesheet.css\")\n\nextensions = ['breathe', 'sphinx_rtd_theme']\nbreathe_projects = { 'xtensor-python': '../xml' }\ntemplates_path = ['_templates']\nhtml_static_path = ['_static']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nproject = 'xtensor-python'\ncopyright = '2016, Johan Mabille and Sylvain Corlay'\nauthor = 'Johan Mabille and Sylvain Corlay'\n\nhtml_logo = 'quantstack-white.svg'\n\nexclude_patterns = []\nhighlight_language = 'c++'\npygments_style = 'sphinx'\ntodo_include_todos = False\nhtmlhelp_basename = 'xtensorpythondoc'\n\n"
  },
  {
    "path": "docs/source/cookiecutter.rst",
    "content": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n   Distributed under the terms of the BSD 3-Clause License.\n\n   The full license is in the file LICENSE, distributed with this software.\n\nGetting started with xtensor-python-cookiecutter\n================================================\n\n`xtensor-python-cookiecutter`_ helps extension authors create Python extension modules making use of xtensor.\n\nIt takes care of the initial work of generating a project skeleton with\n\n- A complete ``setup.py`` compiling the extension module\n- A few examples included in the resulting project including\n\n    - A universal function defined from C++\n    - A function making use of an algorithm from the STL on a numpy array\n    - Unit tests\n    - The generation of the HTML documentation with sphinx\n\nUsage\n-----\n\nInstall cookiecutter_\n\n.. code::\n\n    pip install cookiecutter\n\nAfter installing cookiecutter, use the `xtensor-python-cookiecutter`_:\n\n.. code::\n\n    cookiecutter https://github.com/xtensor-stack/xtensor-python-cookiecutter.git\n\nAs xtensor-python-cookiecutter runs, you will be asked for basic information about\nyour custom extension project. You will be prompted for the following\ninformation:\n\n- ``author_name``: your name or the name of your organization,\n- ``author_email`` : your project's contact email,\n- ``github_project_name``: name of the GitHub repository for your project,\n- ``github_organization_name``: name of the GithHub organization for your project,\n- ``python_package_name``: name of the Python package created by your extension,\n- ``cpp_namespace``: name for the cpp namespace holding the implementation of your extension,\n- ``project_short_description``: a short description for your project.\n  \nThis will produce a directory containing all the required content for a minimal extension\nproject making use of xtensor with all the required boilerplate for package management,\ntogether with a few basic examples.\n\n.. _xtensor-python-cookiecutter: https://github.com/xtensor-stack/xtensor-python-cookiecutter\n\n.. _cookiecutter: https://github.com/audreyr/cookiecutter\n"
  },
  {
    "path": "docs/source/dev_build_options.rst",
    "content": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n   Distributed under the terms of the BSD 3-Clause License.\n\n   The full license is in the file LICENSE, distributed with this software.\n\n\nBuild, test and benchmark\n=========================\n\n``xtensor-python`` build supports the following options:\n\n- ``BUILD_TESTS``: enables the ``xtest`` and ``xbenchmark`` targets (see below).\n- ``DOWNLOAD_GTEST``: downloads ``gtest`` and builds it locally instead of using a binary installation.\n- ``GTEST_SRC_DIR``: indicates where to find the ``gtest`` sources instead of downloading them.\n\nAll these options are disabled by default. Enabling ``DOWNLOAD_GTEST`` or\nsetting ``GTEST_SRC_DIR`` enables ``BUILD_TESTS``.\n\nIf the ``BUILD_TESTS`` option is enabled, the following targets are available:\n\n- xtest: builds an run the test suite.\n- xbenchmark: builds and runs the benchmarks.\n\nFor instance, building the test suite of ``xtensor-python`` and downloading ``gtest`` automatically:\n\n.. code::\n\n    mkdir build\n    cd build\n    cmake -DDOWNLOAD_GTEST=ON ../\n    make xtest\n\nTo run the benchmark:\n\n.. code::\n\n    make xbenchmark\n\nTo test the Python bindings:\n\n.. code::\n\n    cd ..\n    pytest -s\n"
  },
  {
    "path": "docs/source/examples/copy_cast/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.29)\n\nproject(mymodule)\n\nfind_package(pybind11 CONFIG REQUIRED)\nfind_package(xtensor REQUIRED)\nfind_package(xtensor-python REQUIRED)\nfind_package(Python REQUIRED COMPONENTS NumPy)\n\npybind11_add_module(mymodule main.cpp)\ntarget_link_libraries(mymodule PUBLIC pybind11::module xtensor-python Python::NumPy)\n\ntarget_compile_definitions(mymodule PRIVATE VERSION_INFO=0.1.0)\n"
  },
  {
    "path": "docs/source/examples/copy_cast/example.py",
    "content": "import mymodule\nimport numpy as np\n\nc = np.array([[1, 2, 3], [4, 5, 6]])\nassert np.isclose(np.sum(np.sin(c)), mymodule.sum_of_sines(c))\nassert np.isclose(np.sum(np.cos(c)), mymodule.sum_of_cosines(c))\n"
  },
  {
    "path": "docs/source/examples/copy_cast/main.cpp",
    "content": "#include <numeric>\n#include <xtensor.hpp>\n#include <pybind11/pybind11.h>\n#define FORCE_IMPORT_ARRAY\n#include <xtensor-python/pyarray.hpp>\n\ntemplate <class T>\ndouble sum_of_sines(T& m)\n{\n    auto sines = xt::sin(m);  // sines does not actually hold values.\n    return std::accumulate(sines.begin(), sines.end(), 0.0);\n}\n\n// In the Python API this a reference to a temporary variable\ndouble sum_of_cosines(const xt::xarray<double>& m)\n{\n    auto cosines = xt::cos(m);  // cosines does not actually hold values.\n    return std::accumulate(cosines.begin(), cosines.end(), 0.0);\n}\n\nPYBIND11_MODULE(mymodule, m)\n{\n    xt::import_numpy();\n    m.doc() = \"Test module for xtensor python bindings\";\n    m.def(\"sum_of_sines\", sum_of_sines<xt::pyarray<double>>, \"Sum the sines of the input values\");\n    m.def(\"sum_of_cosines\", sum_of_cosines, \"Sum the cosines of the input values\");\n}\n"
  },
  {
    "path": "docs/source/examples/readme_example_1/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.29)\n\nproject(mymodule)\n\nfind_package(Python REQUIRED COMPONENTS Interpreter Development NumPy)\nfind_package(pybind11 REQUIRED CONFIG)\nfind_package(xtensor REQUIRED)\nfind_package(xtensor-python REQUIRED)\n\npybind11_add_module(mymodule main.cpp)\ntarget_link_libraries(mymodule PUBLIC pybind11::module xtensor-python Python::NumPy)\n\ntarget_compile_definitions(mymodule PRIVATE VERSION_INFO=0.1.0)\n"
  },
  {
    "path": "docs/source/examples/readme_example_1/example.py",
    "content": "import mymodule\nimport numpy as np\n\na = np.array([1, 2, 3])\nassert np.isclose(np.sum(np.sin(a)), mymodule.sum_of_sines(a))\n\n"
  },
  {
    "path": "docs/source/examples/readme_example_1/main.cpp",
    "content": "#include <numeric>\n#include <xtensor.hpp>\n#include <pybind11/pybind11.h>\n#define FORCE_IMPORT_ARRAY\n#include <xtensor-python/pyarray.hpp>\n\ndouble sum_of_sines(xt::pyarray<double>& m)\n{\n    auto sines = xt::sin(m);  // sines does not actually hold values.\n    return std::accumulate(sines.begin(), sines.end(), 0.0);\n}\n\nPYBIND11_MODULE(mymodule, m)\n{\n    xt::import_numpy();\n    m.doc() = \"Test module for xtensor python bindings\";\n    m.def(\"sum_of_sines\", sum_of_sines, \"Sum the sines of the input values\");\n}\n"
  },
  {
    "path": "docs/source/examples/sfinae/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.29)\n\nproject(mymodule)\n\nfind_package(Python REQUIRED COMPONENTS Interpreter Development NumPy)\nfind_package(pybind11 REQUIRED CONFIG)\nfind_package(xtensor REQUIRED)\nfind_package(xtensor-python REQUIRED)\n\npybind11_add_module(mymodule python.cpp)\ntarget_link_libraries(mymodule PUBLIC pybind11::module xtensor-python Python::NumPy)\n\ntarget_compile_definitions(mymodule PRIVATE VERSION_INFO=0.1.0)\n\nadd_executable(myexec main.cpp)\ntarget_link_libraries(myexec PUBLIC xtensor)\n"
  },
  {
    "path": "docs/source/examples/sfinae/example.py",
    "content": "import mymodule\nimport numpy as np\n\na = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64)\nb = np.array(a, copy=True)\nmymodule.times_dimension(b) # changing in-place!\nassert np.allclose(2 * a, b)\n\n"
  },
  {
    "path": "docs/source/examples/sfinae/main.cpp",
    "content": "#include \"mymodule.hpp\"\n#include <xtensor/io/xio.hpp>\n\nint main()\n{\n    xt::xtensor<size_t, 2> a = xt::arange<size_t>(2 * 3).reshape({2, 3});\n    mymodule::times_dimension(a);\n    std::cout << a << std::endl;\n    return 0;\n}\n"
  },
  {
    "path": "docs/source/examples/sfinae/mymodule.hpp",
    "content": "#include <xtensor/containers/xtensor.hpp>\n\nnamespace mymodule {\n\ntemplate <class T>\nstruct is_std_vector\n{\n    static const bool value = false;\n};\n\ntemplate <class T>\nstruct is_std_vector<std::vector<T> >\n{\n    static const bool value = true;\n};\n\n// any xtensor object\ntemplate <class T, std::enable_if_t<xt::is_xexpression<T>::value, bool> = true>\nvoid times_dimension(T& t)\n{\n    using value_type = typename T::value_type;\n    t *= (value_type)(t.dimension());\n}\n\n// an std::vector\ntemplate <class T, std::enable_if_t<is_std_vector<T>::value, bool> = true>\nvoid times_dimension(T& t)\n{\n    // do nothing\n}\n\n}\n"
  },
  {
    "path": "docs/source/examples/sfinae/python.cpp",
    "content": "#include \"mymodule.hpp\"\n#include <pybind11/pybind11.h>\n#define FORCE_IMPORT_ARRAY\n#include <xtensor-python/pyarray.hpp>\n\nPYBIND11_MODULE(mymodule, m)\n{\n    xt::import_numpy();\n    m.doc() = \"Test module for xtensor python bindings\";\n    m.def(\"times_dimension\", &mymodule::times_dimension<xt::pyarray<double>>);\n}\n"
  },
  {
    "path": "docs/source/examples.rst",
    "content": "\n****************\n(CMake) Examples\n****************\n\nBasic example (from readme)\n===========================\n\nConsider the following C++ code:\n\n:download:`main.cpp <examples/readme_example_1/main.cpp>`\n\n.. literalinclude:: examples/readme_example_1/main.cpp\n   :language: cpp\n\nThere are several options to build the module,\nwhereby we will use *CMake* here with the following ``CMakeLists.txt``:\n\n:download:`CMakeLists.txt <examples/readme_example_1/CMakeLists.txt>`\n\n.. literalinclude:: examples/readme_example_1/CMakeLists.txt\n   :language: cmake\n\n.. tip::\n\n    There is a potential pitfall here, centered around the fact that *CMake*\n    has a 'new' *FindPython* and a 'classic' *FindPythonLibs*.\n    We here use *FindPython* because of its ability to find the NumPy headers,\n    that we need for *xtensor-python*.\n\n    This has the consequence that when we want to force *CMake*\n    to use a specific *Python* executable, we have to use something like\n\n    .. code-block:: none\n\n        cmake -Bbuild -DPython_EXECUTABLE=`which python`\n\n    whereby it is crucial that one uses the correct case ``Python_EXECUTABLE``, as:\n\n    .. code-block:: none\n\n        Python_EXECUTABLE   <->   FindPython\n        PYTHON_EXECUTABLE   <->   FindPythonLibs\n\n    (remember that *CMake* is **case-sensitive**!).\n\n    Now, since we use *FindPython* because of *xtensor-python* we also want *pybind11*\n    to use *FindPython*\n    (and not the classic *FindPythonLibs*,\n    since we want to specify the *Python* executable only once).\n    To this end we have to make sure to do things in the correct order, which is\n\n    .. code-block:: cmake\n\n        find_package(Python REQUIRED COMPONENTS Interpreter Development NumPy)\n        find_package(pybind11 REQUIRED CONFIG)\n\n    (i.e. one finds *Python* **before** *pybind11*).\n    See the `pybind11 documentation <https://pybind11.readthedocs.io/en/latest/cmake/index.html#new-findpython-mode>`_.\n\n    In addition, be sure to use a quite recent *CMake* version,\n    by starting your ``CMakeLists.txt`` for example with\n\n    .. code-block:: cmake\n\n        cmake_minimum_required(VERSION 3.18..3.20)\n\nThen we can test the module:\n\n:download:`example.py <examples/readme_example_1/example.py>`\n\n.. literalinclude:: examples/readme_example_1/example.py\n   :language: cmake\n\n.. note::\n\n    Since we did not install the module,\n    we should compile and run the example from the same folder.\n    To install, please consult\n    `this *pybind11* / *CMake* example <https://github.com/pybind/cmake_example>`_.\n\n\nType restriction with SFINAE\n============================\n\n.. seealso::\n\n    `Medium post by Johan Mabille <https://medium.com/@johan.mabille/designing-language-bindings-with-xtensor-f32aa0f20db>`__\n    This example covers \"Option 4\".\n\nIn this example we will design a module with a function that accepts an ``xt::xtensor`` as argument,\nbut in such a way that an ``xt::pyxtensor`` can be accepted in the Python module.\nThis is done by having a templated function\n\n.. code-block:: cpp\n\n    template <class T>\n    void times_dimension(T& t);\n\nAs this might be a bit too permissive for your liking, we will show you how to limit the\nscope to *xtensor* types, and allow other overloads using the principle of SFINAE\n(Substitution Failure Is Not An Error).\nIn particular:\n\n:download:`mymodule.hpp <examples/sfinae/mymodule.hpp>`\n\n.. literalinclude:: examples/sfinae/mymodule.hpp\n   :language: cpp\n\nConsequently from C++, the interaction with the module's function is trivial\n\n:download:`main.cpp <examples/sfinae/main.cpp>`\n\n.. literalinclude:: examples/sfinae/main.cpp\n   :language: cpp\n\nFor the Python module we just have to specify the template to be\n``xt::pyarray`` or ``xt::pytensor``. E.g.\n\n:download:`src/python.cpp <examples/sfinae/python.cpp>`\n\n.. literalinclude:: examples/sfinae/python.cpp\n   :language: cpp\n\nWe will again use *CMake* to compile, with the following ``CMakeLists.txt``:\n\n:download:`CMakeLists.txt <examples/sfinae/CMakeLists.txt>`\n\n.. literalinclude:: examples/sfinae/CMakeLists.txt\n   :language: cmake\n\n(see *CMake* tip above).\n\nThen we can test the module:\n\n:download:`example.py <examples/readme_example_1/example.py>`\n\n.. literalinclude:: examples/readme_example_1/example.py\n   :language: cmake\n\n.. note::\n\n    Since we did not install the module,\n    we should compile and run the example from the same folder.\n    To install, please consult\n    `this pybind11 / CMake example <https://github.com/pybind/cmake_example>`_.\n    **Tip**: take care to modify that example with the correct *CMake* case ``Python_EXECUTABLE``.\n\nFall-back cast\n==============\n\nThe previous example showed you how to design your module to be flexible in accepting data.\nFrom C++ we used ``xt::xarray<double>``,\nwhereas for the Python API we used ``xt::pyarray<double>`` to operate directly on the memory\nof a NumPy array from Python (without copying the data).\n\nSometimes, you might not have the flexibility to design your module's methods\nwith template parameters.\nThis might occur when you want to ``override`` functions\n(though it is recommended to use CRTP to still use templates).\nIn this case we can still bind the module in Python using *xtensor-python*,\nhowever, we have to copy the data from a (NumPy) array.\nThis means that although the following signatures are quite different when used from C++,\nas follows:\n\n1.  *Constant reference*: read from the data, without copying it.\n\n    .. code-block:: cpp\n\n         void foo(const xt::xarray<double>& a);\n\n2.  *Reference*: read from and/or write to the data, without copying it.\n\n    .. code-block:: cpp\n\n         void foo(xt::xarray<double>& a);\n\n3.   *Copy*: copy the data.\n\n     .. code-block:: cpp\n\n         void foo(xt::xarray<double> a);\n\nThe Python will all cases result in a copy to a temporary variable\n(though the last signature will lead to a copy to a temporary variable, and another copy to ``a``).\nOn the one hand, this is more costly than when using ``xt::pyarray`` and ``xt::pyxtensor``,\non the other hand, it means that all changes you make to a reference, are made to the temporary\ncopy, and are thus lost.\n\nStill, it might be a convenient way to create Python bindings, using a minimal effort.\nConsider this example:\n\n:download:`main.cpp <examples/copy_cast/main.cpp>`\n\n.. literalinclude:: examples/copy_cast/main.cpp\n   :language: cpp\n"
  },
  {
    "path": "docs/source/index.rst",
    "content": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n   Distributed under the terms of the BSD 3-Clause License.\n\n   The full license is in the file LICENSE, distributed with this software.\n\n.. image:: xtensor-python.svg\n\nPython bindings for the xtensor_ C++ multi-dimensional array library.\n\nIntroduction\n------------\n\nWhat are ``xtensor`` and ``xtensor-python``?\n\n - ``xtensor`` is a C++ library for multi-dimensional arrays enabling numpy-style broadcasting and lazy computing.\n - ``xtensor-python`` enables inplace use of numpy arrays with all the benefits from ``xtensor``\n\n     - C++ universal functions and broadcasting\n     - STL - compliant APIs.\n\n\nThe `numpy to xtensor cheat sheet`_ from the ``xtensor`` documentation shows how numpy APIs translate to C++ with ``xtensor``.\n\nThe Python bindings for ``xtensor`` are based on the pybind11_ C++ library, which enables seemless interoperability between C++ and Python.\n\nEnabling numpy arrays in your C++ libraries\n-------------------------------------------\n\nInstead of exposing new types to python, ``xtensor-python`` enables the use of NumPy_ data structures from C++ using Python's `Buffer Protocol`_.\n\nIn addition to the basic accessors and iterators of ``xtensor`` containers, it also enables using numpy arrays with ``xtensor``'s expression system.\n\nBesides ``xtensor-python`` provides an API to create *Universal functions* from simple scalar functions from your C++ code.\n\nFinally, a cookiecutter template project is provided. It takes care of the initial work of generating a project skeleton for a C++ extension based on ``xtensor-python`` containing a few examples, unit tests and HTML documentation. Find out more about the xtensor-python-cookiecutter_.\n\n``xtensor`` and ``xtensor-python`` require a modern C++ compiler supporting C++14. The following C++ compilers are supported:\n\n- On Windows platforms, Visual C++ 2015 Update 2, or more recent\n- On Unix platforms, gcc 4.9 or a recent version of Clang\n\nLicensing\n---------\n\nWe use a shared copyright model that enables all contributors to maintain the\ncopyright on their contributions.\n\nThis software is licensed under the BSD-3-Clause license. See the LICENSE file for details.\n\n.. toctree::\n   :caption: INSTALLATION\n   :maxdepth: 2\n\n   installation\n\n\n.. toctree::\n   :caption: USAGE\n   :maxdepth: 2\n\n   basic_usage\n   array_tensor\n   numpy_capi\n   examples\n   cookiecutter\n\n.. toctree::\n   :caption: API REFERENCE\n   :maxdepth: 2\n\n   api_reference\n\n.. toctree::\n   :caption: DEVELOPER ZONE\n\n   dev_build_options\n   compilers\n   releasing\n\n.. _NumPy: http://www.numpy.org\n.. _`Buffer Protocol`: https://docs.python.org/3/c-api/buffer.html\n.. _`numpy to xtensor cheat sheet`: http://xtensor.readthedocs.io/en/latest/numpy.html\n.. _xtensor: https://github.com/xtensor-stack/xtensor\n.. _pybind11: https://github.com/pybind/pybind11\n.. _xtensor-python-cookiecutter: https://github.com/xtensor-stack/xtensor-python-cookiecutter\n"
  },
  {
    "path": "docs/source/installation.rst",
    "content": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n   Distributed under the terms of the BSD 3-Clause License.\n\n   The full license is in the file LICENSE, distributed with this software.\n\n\n.. raw:: html\n\n   <style>\n   .rst-content .section>img {\n       width: 30px;\n       margin-bottom: 0;\n       margin-top: 0;\n       margin-right: 15px;\n       margin-left: 15px;\n       float: left;\n   }\n   </style>\n\nInstallation\n============\n\nAlthough ``xtensor-python`` is a header-only library, we provide standardized means to install it, with package managers or with cmake.\n\nBesides the xtendor-python headers, all these methods place the `cmake` project configuration file in the right location so that third-party projects can use cmake's find_package to locate xtensor-python headers.\n\n.. image:: conda.svg\n\nUsing the conda-forge package\n----------------------------\n\nA package for xtensor-python is available on the mamba (or conda) package manager.\n\n.. code::\n\n    mamba install -c conda-forge xtensor-python\n\n.. image:: debian.svg\n\nUsing the Debian package\n------------------------\n\nA package for xtensor-python is available on Debian.\n\n.. code::\n\n    sudo apt-get install xtensor-python-dev\n\n.. image:: cmake.svg\n\nFrom source with cmake\n----------------------\n\nYou can also install ``xtensor-python`` from source with cmake. On Unix platforms, from the source directory:\n\n.. code::\n\n    mkdir build\n    cd build\n    cmake -DCMAKE_INSTALL_PREFIX=/path/to/prefix ..\n    make install\n\nOn Windows platforms, from the source directory:\n\n.. code::\n\n    mkdir build\n    cd build\n    cmake -G \"NMake Makefiles\" -DCMAKE_INSTALL_PREFIX=/path/to/prefix ..\n    nmake\n    nmake install\n\nSee the section of the documentation on :doc:`build-options`, for more details on how to cmake options.\n"
  },
  {
    "path": "docs/source/numpy_capi.rst",
    "content": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n   Distributed under the terms of the BSD 3-Clause License.\n\n   The full license is in the file LICENSE, distributed with this software.\n\nImporting numpy C API\n=====================\n\nImporting the C API module of numpy requires more code than just including a header. ``xtensor-python`` simplifies a lot\nthis import, however some actions are still required in the user code.\n\nExtension module with a single file\n-----------------------------------\n\nWhen writing an extension module that is self-contained in a single file, its author should pay attention to the following\npoints:\n\n- ``FORCE_IMPORT_ARRAY`` must be defined before including any header of ``xtensor-python``.\n- ``xt::import_numpy()`` must be called in the function initializing the module.\n\nThus the basic skeleton of the module looks like:\n\n.. code::\n\n    #define FORCE_IMPORT_ARRAY\n    #include \"xtensor-python/pyarray.hpp\"\n\n    PYBIND11_MODULE(plugin_name, m)\n    {\n        xt::import_numpy();\n        //...\n    }\n\n\nExtension module with multiple files\n------------------------------------\n\nIf the extension module contains many source files that include ``xtensor-python`` header files, the previous points are still\nrequired. However, the symbol ``FORCE_IMPORT_ARRAY`` must be defined only once. The simplest is to define it int the file that\ncontains the initializing code of the module, you can then directly include ``xtensor-python`` headers in other files. Let's\nillustrate this with an extension modules containing the following files:\n\n- ``main.cpp``: initializing code of the module\n- ``image.hpp``: declaration of the ``image`` class embedding an ``xt::pyarray`` object\n- ``image.cpp``: implementation of the ``image`` class\n\nThe basic skeleton of the module looks like:\n\n.. code::\n\n    // image.hpp\n    // Do NOT define FORCE_IMPORT_ARRAY here\n    #include \"xtensor-python/pyarray.hpp\"\n\n    class image\n    {\n    // ....\n    private:\n        xt::pyarray<double> m_data;\n    };\n\n    // image.cpp\n    // Do NOT define FORCE_IMPORT_ARRAY here\n    #include \"image.hpp\"\n    // definition of the image class\n\n    // main.cpp\n    // FORCE_IMPORT_ARRAY must be define ONCE, BEFORE including\n    // any header from xtensor-python (even indirectly)\n    #define FORCE_IMPORT_ARRAY\n    #include \"image.hpp\"\n    PYBIND11_MODULE(plugin_name, m)\n    {\n        xt::import_numpy();\n        //...\n    }\n\n\nUsing other extension modules\n-----------------------------\n\nIncluding an header of ``xtensor-python`` actually defines ``PY_ARRAY_UNIQUE_SYMBOL`` to ``xtensor_python_ARRAY_API``. This might\nbe problematic if you import another library that defines its own ``PY_ARRAY_UNIQUE_SYMBOL``, or if you define yours. If so,\nyou can override the behavior of ``xtensor-python`` by explicitly defining ``PY_ARRAY_UNIQUE_SYMBOL`` prior to including any\n``stenxor-python`` header:\n\n.. code::\n\n    // in every source file\n    #define PY_ARRAY_UNIQUE_SYMBOL my_uniqe_array_api\n    #include \"xtensor-python/pyarray.hpp\"\n\n\n\n"
  },
  {
    "path": "docs/source/pyarray.rst",
    "content": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n   Distributed under the terms of the BSD 3-Clause License.\n\n   The full license is in the file LICENSE, distributed with this software.\n\npyarray\n=======\n\n.. doxygenclass:: xt::pyarray\n   :project: xtensor-python\n   :members:\n"
  },
  {
    "path": "docs/source/pytensor.rst",
    "content": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n   Distributed under the terms of the BSD 3-Clause License.\n\n   The full license is in the file LICENSE, distributed with this software.\n\npytensor\n========\n\n.. doxygenclass:: xt::pytensor\n   :project: xtensor-python\n   :members:\n"
  },
  {
    "path": "docs/source/pyvectorize.rst",
    "content": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n   Distributed under the terms of the BSD 3-Clause License.\n\n   The full license is in the file LICENSE, distributed with this software.\n\npyvectorize\n===========\n\n.. doxygenfunction:: xt::pyvectorize\n   :project: xtensor-python\n"
  },
  {
    "path": "docs/source/releasing.rst",
    "content": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n   Distributed under the terms of the BSD 3-Clause License.\n\n   The full license is in the file LICENSE, distributed with this software.\n\nReleasing xtensor-python\n========================\n\nReleasing a new version\n-----------------------\n\nFrom the master branch of xtensor-python\n\n- Make sure that you are in sync with the master branch of the upstream remote.\n- In file ``xtensor_python_config.hpp``, set the macros for ``XTENSOR_PYTHON_VERSION_MAJOR``, ``XTENSOR_PYTHON_VERSION_MINOR`` and ``XTENSOR_PYTHON_VERSION_PATCH`` to the desired values.\n- Update the readme file w.r.t. dependencies on xtensor and pybind11.\n- Stage the changes (``git add``), commit the changes (``git commit``) and add a tag of the form ``Major.minor.patch``. It is important to not add any other content to the tag name.\n- Push the new commit and tag to the main repository. (``git push``, and ``git push --tags``)\n\nUpdating the conda-forge recipe\n-------------------------------\n\nxtensor-python has been packaged for the conda package manager. Once the new tag has been pushed on GitHub, edit the conda-forge recipe for xtensor in the following fashion:\n\n- Update the version number to the new Major.minor.patch.\n- Set the build number to 0.\n- Update the hash of the source tarball.\n- Check for the versions of the dependencies.\n- Optionally, rerender the conda-forge feedstock.\n"
  },
  {
    "path": "environment-dev.yml",
    "content": "name: xtensor-python\nchannels:\n  - conda-forge\ndependencies:\n  # Build dependencies\n  - cmake\n  - ninja\n  # Host dependencies\n  - xtensor>=0.27,<0.28\n  - numpy>=2.0\n  - pybind11>=2.12.0,<4\n  # Test dependencies\n  - setuptools\n  - pytest\n\n"
  },
  {
    "path": "include/xtensor-python/pyarray.hpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#ifndef PY_ARRAY_HPP\n#define PY_ARRAY_HPP\n\n#include <algorithm>\n#include <cstddef>\n#include <vector>\n\n#include \"xtensor/containers/xbuffer_adaptor.hpp\"\n#include \"xtensor/core/xiterator.hpp\"\n#include \"xtensor/core/xsemantic.hpp\"\n\n#include \"pyarray_backstrides.hpp\"\n#include \"pycontainer.hpp\"\n#include \"pystrides_adaptor.hpp\"\n#include \"pynative_casters.hpp\"\n#include \"xtensor_type_caster_base.hpp\"\n#include \"xtensor_python_config.hpp\"\n\nnamespace xt\n{\n    template <class T, layout_type L = layout_type::dynamic>\n    class pyarray;\n}\n\nnamespace pybind11\n{\n    namespace detail\n    {\n#ifdef PYBIND11_DESCR // The macro is removed from pybind11 since 2.3\n        template <class T, xt::layout_type L>\n        struct handle_type_name<xt::pyarray<T, L>>\n        {\n            static PYBIND11_DESCR name()\n            {\n                return _(\"numpy.ndarray[\") + npy_format_descriptor<T>::name() + _(\"]\");\n            }\n        };\n#endif\n\n        template <typename T, xt::layout_type L>\n        struct pyobject_caster<xt::pyarray<T, L>>\n        {\n            using type = xt::pyarray<T, L>;\n\n            bool load(handle src, bool convert)\n            {\n                if (!convert)\n                {\n                    if (!xt::detail::check_array<T>(src))\n                    {\n                        return false;\n                    }\n                }\n                value = type::ensure(src);\n                return static_cast<bool>(value);\n            }\n\n            static handle cast(const handle& src, return_value_policy, handle)\n            {\n                return src.inc_ref();\n            }\n\n#ifdef PYBIND11_DESCR // The macro is removed from pybind11 since 2.3\n            PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name());\n#else\n            PYBIND11_TYPE_CASTER(type, _(\"numpy.ndarray[\") + npy_format_descriptor<T>::name + _(\"]\"));\n#endif\n        };\n\n        // Type caster for casting ndarray to xexpression<pyarray>\n        template <typename T, xt::layout_type L>\n        struct type_caster<xt::xexpression<xt::pyarray<T, L>>> : pyobject_caster<xt::pyarray<T, L>>\n        {\n            using Type = xt::xexpression<xt::pyarray<T, L>>;\n\n            operator Type&()\n            {\n                return this->value;\n            }\n\n            operator const Type&()\n            {\n                return this->value;\n            }\n        };\n\n    }\n}\n\nnamespace xt\n{\n    template <class T, layout_type L>\n    struct xiterable_inner_types<pyarray<T, L>>\n        : xcontainer_iterable_types<pyarray<T, L>>\n    {\n    };\n\n    template <class T, layout_type L>\n    struct xcontainer_inner_types<pyarray<T, L>>\n    {\n        using storage_type = xbuffer_adaptor<T*>;\n        using reference = typename storage_type::reference;\n        using const_reference = typename storage_type::const_reference;\n        using size_type = typename storage_type::size_type;\n        using shape_type = std::vector<typename storage_type::size_type>;\n        using strides_type = std::vector<typename storage_type::difference_type>;\n        using backstrides_type = pyarray_backstrides<pyarray<T, L>>;\n        using inner_shape_type = xbuffer_adaptor<std::size_t*>;\n        using inner_strides_type = pystrides_adaptor<sizeof(T)>;\n        using inner_backstrides_type = backstrides_type;\n        using temporary_type = pyarray<T, L>;\n        static constexpr layout_type layout = L;\n    };\n\n    /**\n     * @class pyarray\n     * @brief Multidimensional container providing the xtensor container semantics to a numpy array.\n     *\n     * pyarray is similar to the xarray container in that it has a dynamic dimensionality.\n     * Reshapes of a pyarray container are reflected in the underlying numpy array.\n     *\n     * @tparam T The type of the element stored in the pyarray.\n     * @tparam L Static layout of the pyarray\n     *\n     * @sa pytensor\n     */\n    template <class T, layout_type L>\n    class pyarray : public pycontainer<pyarray<T, L>>,\n                    public xcontainer_semantic<pyarray<T, L>>\n    {\n    public:\n\n        using self_type = pyarray<T, L>;\n        using semantic_base = xcontainer_semantic<self_type>;\n        using base_type = pycontainer<self_type>;\n        using storage_type = typename base_type::storage_type;\n        using value_type = typename base_type::value_type;\n        using reference = typename base_type::reference;\n        using const_reference = typename base_type::const_reference;\n        using pointer = typename base_type::pointer;\n        using size_type = typename base_type::size_type;\n        using difference_type = typename base_type::difference_type;\n        using shape_type = typename base_type::shape_type;\n        using strides_type = typename base_type::strides_type;\n        using backstrides_type = typename base_type::backstrides_type;\n        using inner_shape_type = typename base_type::inner_shape_type;\n        using inner_strides_type = typename base_type::inner_strides_type;\n        using inner_backstrides_type = typename base_type::inner_backstrides_type;\n        constexpr static std::size_t rank = SIZE_MAX;\n\n        pyarray();\n        pyarray(const value_type& t);\n        pyarray(nested_initializer_list_t<T, 1> t);\n        pyarray(nested_initializer_list_t<T, 2> t);\n        pyarray(nested_initializer_list_t<T, 3> t);\n        pyarray(nested_initializer_list_t<T, 4> t);\n        pyarray(nested_initializer_list_t<T, 5> t);\n\n        pyarray(pybind11::handle h, pybind11::object::borrowed_t);\n        pyarray(pybind11::handle h, pybind11::object::stolen_t);\n        pyarray(const pybind11::object& o);\n\n        explicit pyarray(const shape_type& shape, layout_type l = layout_type::row_major);\n        explicit pyarray(const shape_type& shape, const_reference value, layout_type l = layout_type::row_major);\n        explicit pyarray(const shape_type& shape, const strides_type& strides, const_reference value);\n        explicit pyarray(const shape_type& shape, const strides_type& strides);\n\n        template <class S = shape_type>\n        static pyarray from_shape(S&& s);\n\n        pyarray(const self_type& rhs);\n        self_type& operator=(const self_type& rhs);\n\n        pyarray(self_type&&) = default;\n        self_type& operator=(self_type&& e) = default;\n\n        template <class E>\n        pyarray(const xexpression<E>& e);\n\n        template <class E>\n        self_type& operator=(const xexpression<E>& e);\n\n        using base_type::begin;\n        using base_type::end;\n\n        static self_type ensure(pybind11::handle h);\n        static bool check_(pybind11::handle h);\n\n#if (PYBIND11_VERSION_MAJOR == 2 && PYBIND11_VERSION_MINOR >= 3) || PYBIND11_VERSION_MAJOR >= 3\n        // Prevent ambiguous overload resolution for operators defined for\n        // both xt::xcontainer_semantic and pybind11::object.\n        using semantic_base::operator+=;\n        using semantic_base::operator-=;\n        using semantic_base::operator*=;\n        using semantic_base::operator/=;\n        using semantic_base::operator|=;\n        using semantic_base::operator&=;\n        using semantic_base::operator^=;\n        // using semantic_base::operator<<=;\n        // using semantic_base::operator>>=;\n#endif\n\n    private:\n\n        inner_shape_type m_shape;\n        inner_strides_type m_strides;\n        mutable inner_backstrides_type m_backstrides;\n        storage_type m_storage;\n\n        void init_array(const shape_type& shape, const strides_type& strides);\n        void init_from_python();\n\n        const inner_shape_type& shape_impl() const noexcept;\n        const inner_strides_type& strides_impl() const noexcept;\n        const inner_backstrides_type& backstrides_impl() const noexcept;\n\n        storage_type& storage_impl() noexcept;\n        const storage_type& storage_impl() const noexcept;\n\n        layout_type default_dynamic_layout();\n\n        friend class xcontainer<pyarray<T, L>>;\n        friend class pycontainer<pyarray<T, L>>;\n    };\n\n    /**************************\n     * pyarray implementation *\n     **************************/\n\n    /**\n     * @name Constructors\n     */\n    //@{\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray()\n        : base_type()\n    {\n        // TODO: avoid allocation\n        shape_type shape = xtl::make_sequence<shape_type>(0, size_type(1));\n        strides_type strides = xtl::make_sequence<strides_type>(0, size_type(0));\n        init_array(shape, strides);\n        detail::default_initialize(m_storage);\n    }\n\n    /**\n     * Allocates a pyarray with nested initializer lists.\n     */\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(const value_type& t)\n        : base_type()\n    {\n        base_type::resize(xt::shape<shape_type>(t), default_dynamic_layout());\n        nested_copy(m_storage.begin(), t);\n    }\n\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(nested_initializer_list_t<T, 1> t)\n        : base_type()\n    {\n        base_type::resize(xt::shape<shape_type>(t), default_dynamic_layout());\n        L == layout_type::row_major ? nested_copy(m_storage.begin(), t) : nested_copy(this->template begin<layout_type::row_major>(), t);\n    }\n\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(nested_initializer_list_t<T, 2> t)\n        : base_type()\n    {\n        base_type::resize(xt::shape<shape_type>(t), default_dynamic_layout());\n        L == layout_type::row_major ? nested_copy(m_storage.begin(), t) : nested_copy(this->template begin<layout_type::row_major>(), t);\n    }\n\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(nested_initializer_list_t<T, 3> t)\n        : base_type()\n    {\n        base_type::resize(xt::shape<shape_type>(t), default_dynamic_layout());\n        L == layout_type::row_major ? nested_copy(m_storage.begin(), t) : nested_copy(this->template begin<layout_type::row_major>(), t);\n    }\n\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(nested_initializer_list_t<T, 4> t)\n        : base_type()\n    {\n        base_type::resize(xt::shape<shape_type>(t), default_dynamic_layout());\n        L == layout_type::row_major ? nested_copy(m_storage.begin(), t) : nested_copy(this->template begin<layout_type::row_major>(), t);\n    }\n\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(nested_initializer_list_t<T, 5> t)\n        : base_type()\n    {\n        base_type::resize(xt::shape<shape_type>(t), default_dynamic_layout());\n        L == layout_type::row_major ? nested_copy(m_storage.begin(), t) : nested_copy(this->template begin<layout_type::row_major>(), t);\n    }\n\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(pybind11::handle h, pybind11::object::borrowed_t b)\n        : base_type(h, b)\n    {\n        init_from_python();\n    }\n\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(pybind11::handle h, pybind11::object::stolen_t s)\n        : base_type(h, s)\n    {\n        init_from_python();\n    }\n\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(const pybind11::object& o)\n        : base_type(o)\n    {\n        init_from_python();\n    }\n\n    /**\n     * Allocates an uninitialized pyarray with the specified shape and\n     * layout.\n     * @param shape the shape of the pyarray\n     * @param l the layout of the pyarray\n     */\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(const shape_type& shape, layout_type l)\n        : base_type()\n    {\n        strides_type strides(shape.size());\n        compute_strides(shape, l, strides);\n        init_array(shape, strides);\n    }\n\n    /**\n     * Allocates a pyarray with the specified shape and layout. Elements\n     * are initialized to the specified value.\n     * @param shape the shape of the pyarray\n     * @param value the value of the elements\n     * @param l the layout of the pyarray\n     */\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(const shape_type& shape, const_reference value, layout_type l)\n        : base_type()\n    {\n        strides_type strides(shape.size());\n        compute_strides(shape, l, strides);\n        init_array(shape, strides);\n        std::fill(m_storage.begin(), m_storage.end(), value);\n    }\n\n    /**\n     * Allocates an uninitialized pyarray with the specified shape and strides.\n     * Elements are initialized to the specified value.\n     * @param shape the shape of the pyarray\n     * @param strides the strides of the pyarray\n     * @param value the value of the elements\n     */\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(const shape_type& shape, const strides_type& strides, const_reference value)\n        : base_type()\n    {\n        init_array(shape, strides);\n        std::fill(m_storage.begin(), m_storage.end(), value);\n    }\n\n    /**\n     * Allocates an uninitialized pyarray with the specified shape and strides.\n     * @param shape the shape of the pyarray\n     * @param strides the strides of the pyarray\n     */\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(const shape_type& shape, const strides_type& strides)\n        : base_type()\n    {\n        init_array(shape, strides);\n    }\n\n    /**\n     * Allocates and returns an pyarray with the specified shape.\n     * @param shape the shape of the pyarray\n     */\n    template <class T, layout_type L>\n    template <class S>\n    inline pyarray<T, L> pyarray<T, L>::from_shape(S&& shape)\n    {\n        auto shp = xtl::forward_sequence<shape_type, S>(shape);\n        return self_type(shp);\n    }\n    //@}\n\n    /**\n     * @name Copy semantic\n     */\n    //@{\n    /**\n     * The copy constructor.\n     */\n    template <class T, layout_type L>\n    inline pyarray<T, L>::pyarray(const self_type& rhs)\n        : base_type(), semantic_base(rhs)\n    {\n        auto tmp = pybind11::reinterpret_steal<pybind11::object>(\n            PyArray_NewLikeArray(rhs.python_array(), NPY_KEEPORDER, nullptr, 1));\n\n        if (!tmp)\n        {\n            throw std::runtime_error(\"NumPy: unable to create ndarray\");\n        }\n\n        this->m_ptr = tmp.release().ptr();\n        init_from_python();\n        std::copy(rhs.storage().cbegin(), rhs.storage().cend(), this->storage().begin());\n    }\n\n    /**\n     * The assignment operator.\n     */\n    template <class T, layout_type L>\n    inline auto pyarray<T, L>::operator=(const self_type& rhs) -> self_type&\n    {\n        self_type tmp(rhs);\n        *this = std::move(tmp);\n        return *this;\n    }\n\n    //@}\n\n    /**\n     * @name Extended copy semantic\n     */\n    //@{\n    /**\n     * The extended copy constructor.\n     */\n    template <class T, layout_type L>\n    template <class E>\n    inline pyarray<T, L>::pyarray(const xexpression<E>& e)\n        : base_type()\n    {\n        // TODO: prevent intermediary shape allocation\n        shape_type shape = xtl::forward_sequence<shape_type, decltype(e.derived_cast().shape())>(e.derived_cast().shape());\n        strides_type strides = xtl::make_sequence<strides_type>(shape.size(), size_type(0));\n        layout_type layout = default_dynamic_layout();\n\n        compute_strides(shape, layout, strides);\n        init_array(shape, strides);\n        semantic_base::assign(e);\n    }\n\n    /**\n     * The extended assignment operator.\n     */\n    template <class T, layout_type L>\n    template <class E>\n    inline auto pyarray<T, L>::operator=(const xexpression<E>& e) -> self_type&\n    {\n        return semantic_base::operator=(e);\n    }\n    //@}\n\n    template <class T, layout_type L>\n    inline auto pyarray<T, L>::ensure(pybind11::handle h) -> self_type\n    {\n        return base_type::ensure(h);\n    }\n\n    template <class T, layout_type L>\n    inline bool pyarray<T, L>::check_(pybind11::handle h)\n    {\n        return base_type::check_(h);\n    }\n\n    template <class T, layout_type L>\n    inline void pyarray<T, L>::init_array(const shape_type& shape, const strides_type& strides)\n    {\n        strides_type adapted_strides(strides);\n\n        std::transform(strides.begin(), strides.end(), adapted_strides.begin(),\n                       [](auto v) { return sizeof(T) * v; });\n\n        int flags = NPY_ARRAY_ALIGNED;\n        if (!std::is_const<T>::value)\n        {\n            flags |= NPY_ARRAY_WRITEABLE;\n        }\n\n        auto dtype = pybind11::detail::npy_format_descriptor<T>::dtype();\n\n        npy_intp* shape_data = reinterpret_cast<npy_intp*>(const_cast<size_type*>(shape.data()));\n        npy_intp* strides_data = reinterpret_cast<npy_intp*>(adapted_strides.data());\n\n        auto tmp = pybind11::reinterpret_steal<pybind11::object>(\n            PyArray_NewFromDescr(&PyArray_Type, (PyArray_Descr*) dtype.release().ptr(), static_cast<int>(shape.size()), shape_data, strides_data,\n                        nullptr, flags, nullptr));\n\n        if (!tmp)\n        {\n            throw std::runtime_error(\"NumPy: unable to create ndarray\");\n        }\n\n        this->m_ptr = tmp.release().ptr();\n        init_from_python();\n    }\n\n    template <class T, layout_type L>\n    inline void pyarray<T, L>::init_from_python()\n    {\n        if (!static_cast<bool>(*this))\n        {\n            return;\n        }\n\n        m_shape = inner_shape_type(reinterpret_cast<size_type*>(PyArray_SHAPE(this->python_array())),\n                                   static_cast<size_type>(PyArray_NDIM(this->python_array())));\n        m_strides = inner_strides_type(reinterpret_cast<difference_type*>(PyArray_STRIDES(this->python_array())),\n                                       static_cast<size_type>(PyArray_NDIM(this->python_array())),\n                                       reinterpret_cast<size_type*>(PyArray_SHAPE(this->python_array())));\n\n        if (L != layout_type::dynamic && !do_strides_match(m_shape, m_strides, L, 1))\n        {\n            throw std::runtime_error(\"NumPy: passing container with bad strides for layout (is it a view?).\");\n        }\n\n        m_backstrides = backstrides_type(*this);\n        m_storage = storage_type(reinterpret_cast<pointer>(PyArray_DATA(this->python_array())),\n                                 this->get_buffer_size());\n    }\n\n    template <class T, layout_type L>\n    inline auto pyarray<T, L>::shape_impl() const noexcept -> const inner_shape_type&\n    {\n        return m_shape;\n    }\n\n    template <class T, layout_type L>\n    inline auto pyarray<T, L>::strides_impl() const noexcept -> const inner_strides_type&\n    {\n        return m_strides;\n    }\n\n    template <class T, layout_type L>\n    inline auto pyarray<T, L>::backstrides_impl() const noexcept -> const inner_backstrides_type&\n    {\n        // m_backstrides wraps the numpy array backstrides, which is a raw pointer.\n        // The address of the raw pointer stored in the wrapper would be invalidated when the pyarray is copied.\n        // Hence, we build a new backstrides object (cheap wrapper around the underlying pointer) upon access.\n        m_backstrides = backstrides_type(*this);\n        return m_backstrides;\n    }\n\n    template <class T, layout_type L>\n    inline auto pyarray<T, L>::storage_impl() noexcept -> storage_type&\n    {\n        return m_storage;\n    }\n\n    template <class T, layout_type L>\n    inline auto pyarray<T, L>::storage_impl() const noexcept -> const storage_type&\n    {\n        return m_storage;\n    }\n\n    template <class T, layout_type L>\n    layout_type pyarray<T, L>::default_dynamic_layout()\n    {\n        return L == layout_type::dynamic ? layout_type::row_major : L;\n    }\n}\n\n#endif\n"
  },
  {
    "path": "include/xtensor-python/pyarray_backstrides.hpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#ifndef PY_ARRAY_BACKSTRIDES_HPP\n#define PY_ARRAY_BACKSTRIDES_HPP\n\n#include <cstddef>\n#include <iterator>\n\nnamespace xt\n{\n\n    /**************************\n     * pybackstrides_iterator *\n     **************************/\n\n    template <class B>\n    class pybackstrides_iterator\n    {\n    public:\n\n        using self_type = pybackstrides_iterator<B>;\n\n        using value_type = typename B::value_type;\n        using pointer = const value_type*;\n        using reference = value_type;\n        using difference_type = std::ptrdiff_t;\n        using iterator_category = std::random_access_iterator_tag;\n\n        pybackstrides_iterator(const B* b, std::size_t offset);\n\n        reference operator*() const;\n        pointer operator->() const;\n\n        reference operator[](difference_type n) const;\n\n        self_type& operator++();\n        self_type& operator--();\n\n        self_type operator++(int);\n        self_type operator--(int);\n\n        self_type& operator+=(difference_type n);\n        self_type& operator-=(difference_type n);\n\n        self_type operator+(difference_type n) const;\n        self_type operator-(difference_type n) const;\n        self_type operator-(const self_type& rhs) const;\n\n        std::size_t offset() const;\n\n    private:\n\n        const B* p_b;\n        std::size_t m_offset;\n    };\n\n    template <class B>\n    inline bool operator==(const pybackstrides_iterator<B>& lhs,\n                           const pybackstrides_iterator<B>& rhs);\n\n    template <class B>\n    inline bool operator!=(const pybackstrides_iterator<B>& lhs,\n                           const pybackstrides_iterator<B>& rhs);\n\n    template <class B>\n    inline bool operator<(const pybackstrides_iterator<B>& lhs,\n                          const pybackstrides_iterator<B>& rhs);\n\n    template <class B>\n    inline bool operator<=(const pybackstrides_iterator<B>& lhs,\n                           const pybackstrides_iterator<B>& rhs);\n\n    template <class B>\n    inline bool operator>(const pybackstrides_iterator<B>& lhs,\n                          const pybackstrides_iterator<B>& rhs);\n\n    template <class B>\n    inline bool operator>=(const pybackstrides_iterator<B>& lhs,\n                           const pybackstrides_iterator<B>& rhs);\n\n    /***********************\n     * pyarray_backstrides *\n     ***********************/\n\n    template <class A>\n    class pyarray_backstrides\n    {\n    public:\n\n        using self_type = pyarray_backstrides<A>;\n        using array_type = A;\n        using value_type = typename array_type::size_type;\n        using const_reference = value_type;\n        using reference = const_reference;\n        using const_pointer = const value_type*;\n        using pointer = const_pointer;\n        using size_type = typename array_type::size_type;\n        using difference_type = typename array_type::difference_type;\n\n        using const_iterator = pybackstrides_iterator<self_type>;\n        using iterator = const_iterator;\n        using reverse_iterator = std::reverse_iterator<iterator>;\n        using const_reverse_iterator = std::reverse_iterator<const_iterator>;\n\n        pyarray_backstrides() = default;\n        pyarray_backstrides(const array_type& a);\n\n        bool empty() const;\n        size_type size() const;\n\n        value_type operator[](size_type i) const;\n\n        const_reference front() const;\n        const_reference back() const;\n\n        const_iterator begin() const;\n        const_iterator end() const;\n        const_iterator cbegin() const;\n        const_iterator cend() const;\n\n        const_reverse_iterator rbegin() const;\n        const_reverse_iterator rend() const;\n        const_reverse_iterator crbegin() const;\n        const_reverse_iterator crend() const;\n\n    private:\n\n        const array_type* p_a;\n    };\n\n    /*****************************************\n     * pybackstrides_iterator implementation *\n     *****************************************/\n    \n    template <class B>\n    inline pybackstrides_iterator<B>::pybackstrides_iterator(const B* b, std::size_t offset)\n        : p_b(b), m_offset(offset)\n    {\n    }\n\n    template <class B>\n    inline auto pybackstrides_iterator<B>::operator*() const -> reference\n    {\n        return p_b->operator[](m_offset);\n    }\n\n    template <class B>\n    inline auto pybackstrides_iterator<B>::operator->() const -> pointer\n    {\n        // Returning the address of a temporary\n        value_type res = p_b->operator[](m_offset);\n        return &res;\n    }\n\n    template <class B>\n    inline auto pybackstrides_iterator<B>::operator[](difference_type n) const -> reference\n    {\n        return p_b->operator[](m_offset + n);\n    }\n\n    template <class B>\n    inline auto pybackstrides_iterator<B>::operator++() -> self_type&\n    {\n        ++m_offset;\n        return *this;\n    }\n\n    template <class B>\n    inline auto pybackstrides_iterator<B>::operator--() -> self_type&\n    {\n        --m_offset;\n            return *this;\n    }\n\n    template <class B>\n    inline auto pybackstrides_iterator<B>::operator++(int )-> self_type\n    {\n        self_type tmp(*this);\n        ++m_offset;\n        return tmp;\n    }\n\n    template <class B>\n    inline auto pybackstrides_iterator<B>::operator--(int) -> self_type\n    {\n        self_type tmp(*this);\n        --m_offset;\n        return tmp;\n    }\n\n    template <class B>\n    inline auto pybackstrides_iterator<B>::operator+=(difference_type n) -> self_type&\n    {\n        m_offset += n;\n        return *this;\n    }\n\n    template <class B>\n    inline auto pybackstrides_iterator<B>::operator-=(difference_type n) -> self_type&\n    {\n        m_offset -= n;\n        return *this;\n    }\n\n    template <class B>\n    inline auto pybackstrides_iterator<B>::operator+(difference_type n) const -> self_type\n    {\n        return self_type(p_b, m_offset + n);\n    }\n\n    template <class B>\n    inline auto pybackstrides_iterator<B>::operator-(difference_type n) const -> self_type\n    {\n        return self_type(p_b, m_offset - n);\n    }\n\n    template <class B>\n    inline auto pybackstrides_iterator<B>::operator-(const self_type& rhs) const -> self_type\n    {\n        self_type tmp(*this);\n        tmp -= (m_offset - rhs.m_offset);\n        return tmp;\n    }\n\n    template <class B>\n    inline std::size_t pybackstrides_iterator<B>::offset() const\n    {\n        return m_offset;\n    }\n\n    template <class B>\n    inline bool operator==(const pybackstrides_iterator<B>& lhs,\n                           const pybackstrides_iterator<B>& rhs)\n    {\n        return lhs.offset() == rhs.offset();\n    }\n\n    template <class B>\n    inline bool operator!=(const pybackstrides_iterator<B>& lhs,\n                           const pybackstrides_iterator<B>& rhs)\n    {\n        return !(lhs == rhs);\n    }\n\n    template <class B>\n    inline bool operator<(const pybackstrides_iterator<B>& lhs,\n                          const pybackstrides_iterator<B>& rhs)\n    {\n        return lhs.offset() < rhs.offset();\n    }\n\n    template <class B>\n    inline bool operator<=(const pybackstrides_iterator<B>& lhs,\n                           const pybackstrides_iterator<B>& rhs)\n    {\n        return (lhs < rhs) || (lhs == rhs);\n    }\n\n    template <class B>\n    inline bool operator>(const pybackstrides_iterator<B>& lhs,\n                          const pybackstrides_iterator<B>& rhs)\n    {\n        return !(lhs <= rhs);\n    }\n\n    template <class B>\n    inline bool operator>=(const pybackstrides_iterator<B>& lhs,\n                           const pybackstrides_iterator<B>& rhs)\n    {\n        return !(lhs < rhs);\n    }\n\n    /**************************************\n     * pyarray_backstrides implementation *\n     **************************************/\n\n    template <class A>\n    inline pyarray_backstrides<A>::pyarray_backstrides(const array_type& a)\n        : p_a(&a)\n    {\n    }\n\n    template <class A>\n    inline bool pyarray_backstrides<A>::empty() const\n    {\n        return p_a->dimension() == 0;\n    }\n\n    template <class A>\n    inline auto pyarray_backstrides<A>::size() const -> size_type\n    {\n        return p_a->dimension();\n    }\n\n    template <class A>\n    inline auto pyarray_backstrides<A>::operator[](size_type i) const -> value_type\n    {\n        value_type sh = p_a->shape()[i];\n        value_type res = sh == 1 ? 0 : (sh - 1) * p_a->strides()[i];\n        return res;\n    }\n\n    template <class A>\n    inline auto pyarray_backstrides<A>::front() const -> const_reference\n    {\n        value_type sh = p_a->shape()[0];\n        value_type res = sh == 1 ? 0 : (sh - 1) * p_a->strides()[0];\n        return res;\n    }\n\n    template <class A>\n    inline auto pyarray_backstrides<A>::back() const -> const_reference\n    {\n        auto index = p_a->size() - 1;\n        value_type sh = p_a->shape()[index];\n        value_type res = sh == 1 ? 0 : (sh - 1) * p_a->strides()[index];\n        return res;\n    }\n\n    template <class A>\n    inline auto pyarray_backstrides<A>::begin() const -> const_iterator\n    {\n        return cbegin();\n    }\n\n    template <class A>\n    inline auto pyarray_backstrides<A>::end() const -> const_iterator\n    {\n        return cend();\n    }\n\n    template <class A>\n    inline auto pyarray_backstrides<A>::cbegin() const -> const_iterator\n    {\n        return const_iterator(this, 0);\n    }\n\n    template <class A>\n    inline auto pyarray_backstrides<A>::cend() const -> const_iterator\n    {\n        return const_iterator(this, size());\n    }\n\n    template <class A>\n    inline auto pyarray_backstrides<A>::rbegin() const -> const_reverse_iterator\n    {\n        return crbegin();\n    }\n\n    template <class A>\n    inline auto pyarray_backstrides<A>::rend() const -> const_reverse_iterator\n    {\n        return crend();\n    }\n\n    template <class A>\n    inline auto pyarray_backstrides<A>::crbegin() const -> const_reverse_iterator\n    {\n        return const_reverse_iterator(end());\n    }\n\n    template <class A>\n    inline auto pyarray_backstrides<A>::crend() const -> const_reverse_iterator\n    {\n        return const_reverse_iterator(begin());\n    }\n\n\n}\n\n#endif\n"
  },
  {
    "path": "include/xtensor-python/pycontainer.hpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#ifndef PY_CONTAINER_HPP\n#define PY_CONTAINER_HPP\n\n#include <cmath>\n#include <functional>\n#include <numeric>\n#include <sstream>\n\n#include \"pybind11/complex.h\"\n#include \"pybind11/pybind11.h\"\n#include \"pybind11/numpy.h\"\n\n#ifndef FORCE_IMPORT_ARRAY\n#define NO_IMPORT_ARRAY\n#endif\n#ifndef PY_ARRAY_UNIQUE_SYMBOL\n#define PY_ARRAY_UNIQUE_SYMBOL xtensor_python_ARRAY_API\n#endif\n#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION\n\n#include \"numpy/arrayobject.h\"\n// Required because pyconfig.hpp defines copysign to _copysign\n#undef copysign\n\n#include <cmath>\n#include \"xtensor/containers/xcontainer.hpp\"\n\n#include \"xtl/xsequence.hpp\"\n\nnamespace xt\n{\n\n    inline void import_numpy();\n\n    /**\n     * @class pycontainer\n     * @brief Base class for xtensor containers wrapping numpy arryays.\n     *\n     * The pycontainer class should not be instantiated directly. Instead, used should\n     * use pytensor and pyarray instancs.\n     *\n     * @tparam D The derived type, i.e. the inheriting class for which pycontainer\n     *           provides the interface.\n     */\n    template <class D>\n    class pycontainer : public pybind11::object,\n                        public xcontainer<D>\n    {\n    public:\n\n        using derived_type = D;\n\n        using base_type = xcontainer<D>;\n        using inner_types = xcontainer_inner_types<D>;\n        using storage_type = typename inner_types::storage_type;\n        using value_type = typename storage_type::value_type;\n        using reference = typename storage_type::reference;\n        using const_reference = typename storage_type::const_reference;\n        using pointer = typename storage_type::pointer;\n        using const_pointer = typename storage_type::const_pointer;\n        using size_type = typename storage_type::size_type;\n        using difference_type = typename storage_type::difference_type;\n\n        using shape_type = typename inner_types::shape_type;\n        using strides_type = typename inner_types::strides_type;\n        using backstrides_type = typename inner_types::backstrides_type;\n        using inner_shape_type = typename inner_types::inner_shape_type;\n        using inner_strides_type = typename inner_types::inner_strides_type;\n\n        using iterable_base = xcontainer<D>;\n\n        using iterator = typename iterable_base::iterator;\n        using const_iterator = typename iterable_base::const_iterator;\n\n        using stepper = typename iterable_base::stepper;\n        using const_stepper = typename iterable_base::const_stepper;\n\n        template <class S = shape_type>\n        void resize(const S& shape);\n        template <class S = shape_type>\n        void resize(const S& shape, layout_type l);\n        template <class S = shape_type>\n        void resize(const S& shape, const strides_type& strides);\n\n        template <class S = shape_type>\n        auto& reshape(S&& shape, layout_type layout = base_type::static_layout) &;\n\n        layout_type layout() const;\n        bool is_contiguous() const noexcept;\n\n        using base_type::operator();\n        using base_type::operator[];\n        using base_type::begin;\n        using base_type::end;\n\n    protected:\n\n        pycontainer();\n        ~pycontainer() = default;\n\n        pycontainer(pybind11::handle h, borrowed_t);\n        pycontainer(pybind11::handle h, stolen_t);\n        pycontainer(const pybind11::object& o);\n\n        pycontainer(const pycontainer&) = default;\n        pycontainer& operator=(const pycontainer&) = default;\n\n        pycontainer(pycontainer&&) = default;\n        pycontainer& operator=(pycontainer&&) = default;\n\n        static derived_type ensure(pybind11::handle h);\n        static bool check_(pybind11::handle h);\n        static PyObject* raw_array_t(PyObject* ptr);\n\n        derived_type& derived_cast();\n        const derived_type& derived_cast() const;\n\n        PyArrayObject* python_array() const;\n        size_type get_buffer_size() const;\n\n    private:\n\n#if (PYBIND11_VERSION_MAJOR == 2 && PYBIND11_VERSION_MINOR >= 3) || PYBIND11_VERSION_MAJOR >= 3\n        // Prevent ambiguous overload resolution for operators defined for\n        // both xt::xcontainer and pybind11::object.\n        using pybind11::object::operator~;\n        using pybind11::object::operator+;\n        using pybind11::object::operator-;\n        using pybind11::object::operator*;\n        using pybind11::object::operator/;\n        using pybind11::object::operator|;\n        using pybind11::object::operator&;\n        using pybind11::object::operator^;\n        using pybind11::object::operator<<;\n        using pybind11::object::operator>>;\n#endif\n    };\n\n    namespace detail\n    {\n        template <class T, class E = void>\n        struct numpy_traits;\n\n        template <class T>\n        struct numpy_traits<T, std::enable_if_t<pybind11::detail::satisfies_any_of<T, std::is_arithmetic, xtl::is_complex>::value>>\n        {\n        private:\n\n            // On Windows 64 bits, NPY_INT != NPY_INT32 and NPY_UINT != NPY_UINT32\n            // We use the NPY_INT32 and NPY_UINT32 which are consistent with the values\n            // of NPY_LONG and NPY_ULONG\n            // On Linux x64, NPY_INT64 != NPY_LONGLONG and NPY_UINT64 != NPY_ULONGLONG,\n            // we use the values of NPY_INT64 and NPY_UINT64 which are consistent with the\n            // values of NPY_LONG and NPY_ULONG.\n            constexpr static const int value_list[15] = {\n                NPY_BOOL,\n                NPY_BYTE, NPY_UBYTE, NPY_SHORT, NPY_USHORT,\n                NPY_INT32, NPY_UINT32, NPY_INT64, NPY_UINT64,\n                NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE,\n                NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE};\n\n        public:\n\n            using value_type = std::remove_const_t<T>;\n\n            static constexpr int type_num = value_list[pybind11::detail::is_fmt_numeric<value_type>::index];\n        };\n\n        // On Linux x64, NPY_INT64 != NPY_LONGLONG and NPY_UINT64 != NPY_ULONGLONG\n        // NPY_LONGLONG and NPY_ULONGLONG must be adjusted so the right type is\n        // selected\n        template <bool>\n        struct numpy_enum_adjuster\n        {\n            static inline int pyarray_type(PyArrayObject* obj)\n            {\n                return PyArray_TYPE(obj);\n            }\n        };\n\n        template <>\n        struct numpy_enum_adjuster<true>\n        {\n            static inline int pyarray_type(PyArrayObject* obj)\n            {\n                int res = PyArray_TYPE(obj);\n                if(res == NPY_LONGLONG || res == NPY_ULONGLONG)\n                {\n                    res -= 2;\n                }\n                return res;\n            }\n        };\n\n        inline int pyarray_type(PyArrayObject* obj)\n        {\n            return numpy_enum_adjuster<NPY_LONGLONG != NPY_INT64>::pyarray_type(obj);\n        }\n\n        template <class T>\n        void default_initialize_impl(T& /*storage*/, std::false_type)\n        {\n        }\n\n        template <class T>\n        void default_initialize_impl(T& storage, std::true_type)\n        {\n            using value_type = typename T::value_type;\n            storage[0] = value_type{};\n        }\n\n        template <class T>\n        void default_initialize(T& storage)\n        {\n            using value_type = typename T::value_type;\n            default_initialize_impl(storage, std::is_copy_assignable<value_type>());\n        }\n\n        template <class T>\n        bool check_array_type(const pybind11::handle& src, std::true_type)\n        {\n            int type_num = xt::detail::numpy_traits<T>::type_num;\n            return xt::detail::pyarray_type(reinterpret_cast<PyArrayObject*>(src.ptr())) == type_num;\n        }\n\n        template <class T>\n        bool check_array_type(const pybind11::handle& src, std::false_type)\n        {\n            return PyArray_EquivTypes((PyArray_Descr*) pybind11::detail::array_proxy(src.ptr())->descr,\n                                      (PyArray_Descr*) pybind11::dtype::of<T>().ptr());\n        }\n\n        template <class T>\n        bool check_array(const pybind11::handle& src)\n        {\n            using is_arithmetic_type = std::integral_constant<bool, bool(pybind11::detail::satisfies_any_of<T, std::is_arithmetic, xtl::is_complex>::value)>;\n            return PyArray_Check(src.ptr()) && check_array_type<T>(src, is_arithmetic_type{});\n        }\n    }\n\n    /******************************\n     * pycontainer implementation *\n     ******************************/\n\n    template <class D>\n    inline pycontainer<D>::pycontainer()\n        : pybind11::object()\n    {\n    }\n\n    template <class D>\n    inline pycontainer<D>::pycontainer(pybind11::handle h, borrowed_t b)\n        : pybind11::object(h, b)\n    {\n    }\n\n    template <class D>\n    inline pycontainer<D>::pycontainer(pybind11::handle h, stolen_t s)\n        : pybind11::object(h, s)\n    {\n    }\n\n    template <class D>\n    inline pycontainer<D>::pycontainer(const pybind11::object& o)\n        : pybind11::object(raw_array_t(o.ptr()), pybind11::object::stolen_t{})\n    {\n        if (!this->m_ptr)\n        {\n            throw pybind11::error_already_set();\n        }\n    }\n\n    template <class D>\n    inline auto pycontainer<D>::ensure(pybind11::handle h) -> derived_type\n    {\n        auto result = pybind11::reinterpret_steal<derived_type>(raw_array_t(h.ptr()));\n        if (result.ptr() == nullptr)\n        {\n            PyErr_Clear();\n        }\n        return result;\n    }\n\n    template <class D>\n    inline bool pycontainer<D>::check_(pybind11::handle h)\n    {\n        return detail::check_array<typename D::value_type>(h);\n    }\n\n    template <class D>\n    inline PyObject* pycontainer<D>::raw_array_t(PyObject* ptr)\n    {\n        if (ptr == nullptr)\n        {\n            return nullptr;\n        }\n\n        auto dtype = pybind11::detail::npy_format_descriptor<value_type>::dtype();\n        auto res = PyArray_FromAny(ptr, (PyArray_Descr *) dtype.release().ptr(), 0, 0,\n                                   NPY_ARRAY_ENSUREARRAY | NPY_ARRAY_FORCECAST, nullptr);\n        return res;\n    }\n\n    template <class D>\n    inline PyArrayObject* pycontainer<D>::python_array() const\n    {\n        return reinterpret_cast<PyArrayObject*>(this->m_ptr);\n    }\n\n    template <class D>\n    inline auto pycontainer<D>::get_buffer_size() const -> size_type\n    {\n        const size_type& (*min)(const size_type&, const size_type&) = std::min<size_type>;\n        size_type min_stride = this->strides().empty() ? size_type(1) :\n            std::max(size_type(1), std::accumulate(this->strides().cbegin(),\n                                                   this->strides().cend(),\n                                                   std::numeric_limits<size_type>::max(),\n                                                   min));\n        return min_stride * static_cast<size_type>(PyArray_SIZE(this->python_array()));\n    }\n\n    template <class D>\n    inline auto pycontainer<D>::derived_cast() -> derived_type&\n    {\n        return *static_cast<derived_type*>(this);\n    }\n\n    template <class D>\n    inline auto pycontainer<D>::derived_cast() const -> const derived_type&\n    {\n        return *static_cast<const derived_type*>(this);\n    }\n\n    namespace detail\n    {\n        template <class S>\n        struct check_dims\n        {\n            static bool run(std::size_t)\n            {\n                return true;\n            }\n        };\n\n        template <class T, std::size_t N>\n        struct check_dims<std::array<T, N>>\n        {\n            static bool run(std::size_t new_dim)\n            {\n                if(new_dim != N)\n                {\n                    std::ostringstream err_msg;\n                    err_msg << \"Invalid conversion to pycontainer, expecting a container of dimension \"\n                            << N << \", got a container of dimension \" << new_dim << \".\";\n                    throw std::runtime_error(err_msg.str());\n                }\n                return new_dim == N;\n            }\n        };\n    }\n\n    /**\n     * resizes the container.\n     * @param shape the new shape\n     */\n    template <class D>\n    template <class S>\n    inline void pycontainer<D>::resize(const S& shape)\n    {\n        if (shape.size() != this->dimension() || !std::equal(std::begin(shape), std::end(shape), std::begin(this->shape())))\n        {\n            resize(shape, layout_type::row_major);\n        }\n    }\n\n    /**\n     * resizes the container.\n     * @param shape the new shape\n     * @param l the new layout\n     */\n    template <class D>\n    template <class S>\n    inline void pycontainer<D>::resize(const S& shape, layout_type l)\n    {\n        strides_type strides = xtl::make_sequence<strides_type>(shape.size(), size_type(1));\n        compute_strides(shape, l, strides);\n        resize(shape, strides);\n    }\n\n    /**\n     * resizes the container.\n     * @param shape the new shape\n     * @param strides the new strides\n     */\n    template <class D>\n    template <class S>\n    inline void pycontainer<D>::resize(const S& shape, const strides_type& strides)\n    {\n        detail::check_dims<shape_type>::run(shape.size());\n        derived_type tmp(xtl::forward_sequence<shape_type, decltype(shape)>(shape), strides);\n        *static_cast<derived_type*>(this) = std::move(tmp);\n    }\n\n    template <class D>\n    template <class S>\n    inline auto& pycontainer<D>::reshape(S&& shape, layout_type layout) &\n    {\n        if (compute_size(shape) != this->size())\n        {\n            throw std::runtime_error(\"Cannot reshape with incorrect number of elements (\" + std::to_string(this->size()) + \" vs \" + std::to_string(compute_size(shape)) + \")\");\n        }\n        detail::check_dims<shape_type>::run(shape.size());\n        layout = default_assignable_layout(layout);\n\n        NPY_ORDER npy_layout;\n        if (layout == layout_type::row_major)\n        {\n            npy_layout = NPY_CORDER;\n        }\n        else if (layout == layout_type::column_major)\n        {\n            npy_layout = NPY_FORTRANORDER;\n        }\n        else\n        {\n            throw std::runtime_error(\"Cannot reshape with unknown layout_type.\");\n        }\n\n        using shape_ptr = typename std::decay_t<S>::pointer;\n        PyArray_Dims dims = {reinterpret_cast<npy_intp*>(const_cast<shape_ptr>(shape.data())), static_cast<int>(shape.size())};\n        auto new_ptr = PyArray_Newshape((PyArrayObject*) this->ptr(), &dims, npy_layout);\n        auto old_ptr = this->ptr();\n        this->ptr() = new_ptr;\n        Py_XDECREF(old_ptr);\n        this->derived_cast().init_from_python();\n        return *this;\n    }\n\n    /**\n     * Return the layout_type of the container\n     * @return layout_type of the container\n     */\n    template <class D>\n    inline layout_type pycontainer<D>::layout() const\n    {\n        if (PyArray_CHKFLAGS(python_array(), NPY_ARRAY_C_CONTIGUOUS))\n        {\n            return layout_type::row_major;\n        }\n        else if (PyArray_CHKFLAGS(python_array(), NPY_ARRAY_F_CONTIGUOUS))\n        {\n            return layout_type::column_major;\n        }\n        else\n        {\n            return layout_type::dynamic;\n        }\n    }\n\n    /**\n     * Return whether or not the container uses contiguous buffer\n     * @return Boolean for contiguous buffer\n     */\n    template <class D>\n    inline bool pycontainer<D>::is_contiguous() const noexcept\n    {\n        if (this->strides().size() == 0)\n        {\n            return true;\n        }\n        else if (PyArray_CHKFLAGS(python_array(), NPY_ARRAY_C_CONTIGUOUS))\n        {\n            return 1 == this->strides().back();\n        }\n        else if (PyArray_CHKFLAGS(python_array(), NPY_ARRAY_F_CONTIGUOUS))\n        {\n            return 1 == this->strides().front();\n        }\n        else\n        {\n            return false;\n        }\n    }\n\n    /**\n     * Import the numpy Python module.\n     */\n    inline void import_numpy()\n    {\n#ifdef FORCE_IMPORT_ARRAY\n        if (_import_array() < 0)\n        {\n            PyErr_Print();\n            PyErr_SetString(PyExc_ImportError, \"numpy.core.multiarray failed to import\");\n        }\n#endif\n    }\n\n#if defined(__GNUC__) && !defined(__clang__)\n    namespace workaround\n    {\n        // Fixes \"undefined symbol\" issues\n        inline void long_long_allocator()\n        {\n            std::allocator<long long> a;\n            std::allocator<unsigned long long> b;\n            std::allocator<double> c;\n            std::allocator<std::complex<double>> d;\n        }\n    }\n#endif\n}\n\n#endif\n"
  },
  {
    "path": "include/xtensor-python/pynative_casters.hpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#ifndef PYNATIVE_CASTERS_HPP\n#define PYNATIVE_CASTERS_HPP\n\n#include \"xtensor_type_caster_base.hpp\"\n\nnamespace pybind11\n{\n    namespace detail\n    {\n        // Type caster for casting xarray to ndarray\n        template <class T, xt::layout_type L>\n        struct type_caster<xt::xarray<T, L>> : xtensor_type_caster_base<xt::xarray<T, L>>\n        {\n        };\n\n        // Type caster for casting xt::xtensor to ndarray\n        template <class T, std::size_t N, xt::layout_type L>\n        struct type_caster<xt::xtensor<T, N, L>> : xtensor_type_caster_base<xt::xtensor<T, N, L>>\n        {\n        };\n\n        // Type caster for casting xt::xtensor_fixed to ndarray\n        template <class T, class FSH, xt::layout_type L>\n        struct type_caster<xt::xtensor_fixed<T, FSH, L>> : xtensor_type_caster_base<xt::xtensor_fixed<T, FSH, L>>\n        {\n        };\n\n        // Type caster for casting xt::xstrided_view to ndarray\n        template <class CT, class S, xt::layout_type L, class FST>\n        struct type_caster<xt::xstrided_view<CT, S, L, FST>> : xtensor_type_caster_base<xt::xstrided_view<CT, S, L, FST>>\n        {\n        };\n\n        // Type caster for casting xt::xarray_adaptor to ndarray\n        template <class EC, xt::layout_type L, class SC, class Tag>\n        struct type_caster<xt::xarray_adaptor<EC, L, SC, Tag>> : xtensor_type_caster_base<xt::xarray_adaptor<EC, L, SC, Tag>>\n        {\n        };\n\n        // Type caster for casting xt::xtensor_adaptor to ndarray\n        template <class EC, std::size_t N, xt::layout_type L, class Tag>\n        struct type_caster<xt::xtensor_adaptor<EC, N, L, Tag>> : xtensor_type_caster_base<xt::xtensor_adaptor<EC, N, L, Tag>>\n        {\n        };\n    }\n}\n\n#endif\n"
  },
  {
    "path": "include/xtensor-python/pystrides_adaptor.hpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#ifndef PYSTRIDES_ADAPTOR_HPP\n#define PYSTRIDES_ADAPTOR_HPP\n\n#include <cstddef>\n#include <iterator>\n\nnamespace xt\n{\n\n    template <std::size_t N>\n    class pystrides_iterator;\n\n    /*********************************\n     * pystrides_adaptor declaration *\n     *********************************/\n\n    template <std::size_t N>\n    class pystrides_adaptor\n    {\n    public:\n\n        using value_type = std::ptrdiff_t;\n        using const_reference = value_type;\n        using reference = const_reference;\n        using const_pointer = const value_type*;\n        using pointer = const_pointer;\n        using size_type = std::size_t;\n        using difference_type = std::ptrdiff_t;\n\n        using const_iterator = pystrides_iterator<N>;\n        using iterator = const_iterator;\n        using const_reverse_iterator = std::reverse_iterator<const_iterator>;\n        using reverse_iterator = const_reverse_iterator;\n\n        using shape_type = size_t*;\n\n        pystrides_adaptor() = default;\n        pystrides_adaptor(const_pointer data, size_type size, shape_type shape);\n\n        bool empty() const noexcept;\n        size_type size() const noexcept;\n\n        const_reference operator[](size_type i) const;\n\n        const_reference front() const;\n        const_reference back() const;\n\n        const_iterator begin() const;\n        const_iterator end() const;\n        const_iterator cbegin() const;\n        const_iterator cend() const;\n\n        const_reverse_iterator rbegin() const;\n        const_reverse_iterator rend() const;\n        const_reverse_iterator crbegin() const;\n        const_reverse_iterator crend() const;\n\n    private:\n\n        const_pointer p_data;\n        size_type m_size;\n        shape_type p_shape;\n    };\n\n    /**********************************\n     * pystrides_iterator declaration *\n     **********************************/\n\n    template <std::size_t N>\n    class pystrides_iterator\n    {\n    public:\n\n        using self_type = pystrides_iterator<N>;\n\n        using value_type = typename pystrides_adaptor<N>::value_type;\n        using pointer = typename pystrides_adaptor<N>::const_pointer;\n        using reference = typename pystrides_adaptor<N>::const_reference;\n        using difference_type = typename pystrides_adaptor<N>::difference_type;\n        using iterator_category = std::random_access_iterator_tag;\n        using shape_pointer = typename pystrides_adaptor<N>::shape_type;\n\n        pystrides_iterator() = default;\n\n        inline pystrides_iterator(pointer current, shape_pointer shape)\n            : p_current(current)\n            , p_shape(shape)\n        {\n        }\n\n        inline reference operator*() const\n        {\n            return *p_shape == size_t(1) ? 0 : *p_current / N;\n        }\n\n        inline pointer operator->() const\n        {\n            // Returning the address of a temporary\n            value_type res = this->operator*();\n            return &res;\n        }\n\n        inline reference operator[](difference_type n) const\n        {\n            return *(p_current + n) / N;\n        }\n\n        inline self_type& operator++()\n        {\n            ++p_current;\n            ++p_shape;\n            return *this;\n        }\n\n        inline self_type& operator--()\n        {\n            --p_current;\n            --p_shape;\n            return *this;\n        }\n\n        inline self_type operator++(int)\n        {\n            self_type tmp(*this);\n            ++p_current;\n            ++p_shape;\n            return tmp;\n        }\n\n        inline self_type operator--(int)\n        {\n            self_type tmp(*this);\n            --p_current;\n            --p_shape;\n            return tmp;\n        }\n\n        inline self_type& operator+=(difference_type n)\n        {\n            p_current += n;\n            p_shape += n;\n            return *this;\n        }\n\n        inline self_type& operator-=(difference_type n)\n        {\n            p_current -= n;\n            p_shape -= n;\n            return *this;\n        }\n\n        inline self_type operator+(difference_type n) const\n        {\n            return self_type(p_current + n, p_shape + n);\n        }\n\n        inline self_type operator-(difference_type n) const\n        {\n            return self_type(p_current - n, p_shape - n);\n        }\n\n        inline difference_type operator-(const self_type& rhs) const\n        {\n            self_type tmp(*this);\n            return p_current - rhs.p_current;\n        }\n\n        pointer get_pointer() const { return p_current; }\n\n    private:\n\n        pointer p_current;\n        shape_pointer p_shape;\n    };\n\n    template <std::size_t N>\n    inline bool operator==(const pystrides_iterator<N>& lhs,\n                           const pystrides_iterator<N>& rhs)\n    {\n        return lhs.get_pointer() == rhs.get_pointer();\n    }\n\n    template <std::size_t N>\n    inline bool operator!=(const pystrides_iterator<N>& lhs,\n                           const pystrides_iterator<N>& rhs)\n    {\n        return !(lhs == rhs);\n    }\n\n    template <std::size_t N>\n    inline bool operator<(const pystrides_iterator<N>& lhs,\n                          const pystrides_iterator<N>& rhs)\n    {\n        return lhs.get_pointer() < rhs.get_pointer();\n    }\n\n    template <std::size_t N>\n    inline bool operator<=(const pystrides_iterator<N>& lhs,\n                           const pystrides_iterator<N>& rhs)\n    {\n        return (lhs < rhs) || (lhs == rhs);\n    }\n\n    template <std::size_t N>\n    inline bool operator>(const pystrides_iterator<N>& lhs,\n                          const pystrides_iterator<N>& rhs)\n    {\n        return !(lhs <= rhs);\n    }\n\n    template <std::size_t N>\n    inline bool operator>=(const pystrides_iterator<N>& lhs,\n                           const pystrides_iterator<N>& rhs)\n    {\n        return !(lhs < rhs);\n    }\n\n    /************************************\n     * pystrides_adaptor implementation *\n     ************************************/\n\n    template <std::size_t N>\n    inline pystrides_adaptor<N>::pystrides_adaptor(const_pointer data, size_type size, shape_type shape)\n        : p_data(data), m_size(size), p_shape(shape)\n    {\n    }\n\n    template <std::size_t N>\n    inline bool pystrides_adaptor<N>::empty() const noexcept\n    {\n        return m_size == 0;\n    }\n\n    template <std::size_t N>\n    inline auto pystrides_adaptor<N>::size() const noexcept -> size_type\n    {\n        return m_size;\n    }\n\n    template <std::size_t N>\n    inline auto pystrides_adaptor<N>::operator[](size_type i) const -> const_reference\n    {\n        return p_shape[i] == size_t(1) ? 0 : p_data[i] / N;\n    }\n\n    template <std::size_t N>\n    inline auto pystrides_adaptor<N>::front() const -> const_reference\n    {\n        return this->operator[](0);\n    }\n\n    template <std::size_t N>\n    inline auto pystrides_adaptor<N>::back() const -> const_reference\n    {\n        return this->operator[](m_size - 1);\n    }\n\n    template <std::size_t N>\n    inline auto pystrides_adaptor<N>::begin() const -> const_iterator\n    {\n        return cbegin();\n    }\n\n    template <std::size_t N>\n    inline auto pystrides_adaptor<N>::end() const -> const_iterator\n    {\n        return cend();\n    }\n\n    template <std::size_t N>\n    inline auto pystrides_adaptor<N>::cbegin() const -> const_iterator\n    {\n        return const_iterator(p_data, p_shape);\n    }\n\n    template <std::size_t N>\n    inline auto pystrides_adaptor<N>::cend() const -> const_iterator\n    {\n        return const_iterator(p_data + m_size, p_shape + m_size);\n    }\n\n    template <std::size_t N>\n    inline auto pystrides_adaptor<N>::rbegin() const -> const_reverse_iterator\n    {\n        return crbegin();\n    }\n\n    template <std::size_t N>\n    inline auto pystrides_adaptor<N>::rend() const -> const_reverse_iterator\n    {\n        return crend();\n    }\n\n    template <std::size_t N>\n    inline auto pystrides_adaptor<N>::crbegin() const -> const_reverse_iterator\n    {\n        return const_reverse_iterator(cend());\n    }\n\n    template <std::size_t N>\n    inline auto pystrides_adaptor<N>::crend() const -> const_reverse_iterator\n    {\n        return const_reverse_iterator(cbegin());\n    }\n}\n\n#endif\n"
  },
  {
    "path": "include/xtensor-python/pytensor.hpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#ifndef PY_TENSOR_HPP\n#define PY_TENSOR_HPP\n\n#include <algorithm>\n#include <array>\n#include <cstddef>\n\n#include \"xtensor/containers/xbuffer_adaptor.hpp\"\n#include \"xtensor/core/xiterator.hpp\"\n#include \"xtensor/core/xsemantic.hpp\"\n#include \"xtensor/utils/xutils.hpp\"\n\n#include \"pycontainer.hpp\"\n#include \"pystrides_adaptor.hpp\"\n#include \"pynative_casters.hpp\"\n#include \"xtensor_type_caster_base.hpp\"\n#include \"xtensor_python_config.hpp\"\n\nnamespace xt\n{\n    template <class T, std::size_t N, layout_type L = layout_type::dynamic>\n    class pytensor;\n}\n\nnamespace pybind11\n{\n    namespace detail\n    {\n#ifdef PYBIND11_DESCR // The macro is removed from pybind11 since 2.3\n        template <class T, std::size_t N, xt::layout_type L>\n        struct handle_type_name<xt::pytensor<T, N, L>>\n        {\n            static PYBIND11_DESCR name()\n            {\n                return _(\"numpy.ndarray[\") + npy_format_descriptor<T>::name() + _(\"]\");\n            }\n        };\n#endif\n\n        template <class T, std::size_t N, xt::layout_type L>\n        struct pyobject_caster<xt::pytensor<T, N, L>>\n        {\n            using type = xt::pytensor<T, N, L>;\n\n            bool load(handle src, bool convert)\n            {\n                if (!convert)\n                {\n                    if (!xt::detail::check_array<T>(src))\n                    {\n                        return false;\n                    }\n                }\n\n                try\n                {\n                    value = type::ensure(src);\n                }\n                catch (const std::runtime_error&)\n                {\n                    return false;\n                }\n                return static_cast<bool>(value);\n            }\n\n            static handle cast(const handle& src, return_value_policy, handle)\n            {\n                return src.inc_ref();\n            }\n\n#ifdef PYBIND11_DESCR // The macro is removed from pybind11 since 2.3\n            PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name());\n#else\n            PYBIND11_TYPE_CASTER(type, _(\"numpy.ndarray[\") + npy_format_descriptor<T>::name + _(\"]\"));\n#endif\n        };\n\n        // Type caster for casting ndarray to xexpression<pytensor>\n        template <class T, std::size_t N, xt::layout_type L>\n        struct type_caster<xt::xexpression<xt::pytensor<T, N, L>>> : pyobject_caster<xt::pytensor<T, N, L>>\n        {\n            using Type = xt::xexpression<xt::pytensor<T, N, L>>;\n\n            operator Type&()\n            {\n                return this->value;\n            }\n\n            operator const Type&()\n            {\n                return this->value;\n            }\n        };\n\n    } // namespace detail\n}\n\nnamespace xt\n{\n    namespace detail {\n\n        template <std::size_t N>\n        struct numpy_strides\n        {\n            npy_intp value[N];\n        };\n\n        template <>\n        struct numpy_strides<0>\n        {\n            npy_intp* value = nullptr;\n        };\n\n    } // namespace detail\n\n    template <class T, std::size_t N, layout_type L>\n    struct xiterable_inner_types<pytensor<T, N, L>>\n        : xcontainer_iterable_types<pytensor<T, N, L>>\n    {\n    };\n\n    template <class T, std::size_t N, layout_type L>\n    struct xcontainer_inner_types<pytensor<T, N, L>>\n    {\n        using storage_type = xbuffer_adaptor<T*>;\n        using reference = typename storage_type::reference;\n        using const_reference = typename storage_type::const_reference;\n        using size_type = typename storage_type::size_type;\n        using shape_type = std::array<npy_intp, N>;\n        using strides_type = shape_type;\n        using backstrides_type = shape_type;\n        using inner_shape_type = shape_type;\n        using inner_strides_type = strides_type;\n        using inner_backstrides_type = backstrides_type;\n        using temporary_type = pytensor<T, N, L>;\n        static constexpr layout_type layout = L;\n    };\n\n    /**\n     * @class pytensor\n     * @brief Multidimensional container providing the xtensor container semantics wrapping a numpy array.\n     *\n     * pytensor is similar to the xtensor container in that it has a static dimensionality.\n     *\n     * Unlike the pyarray container, pytensor cannot be reshaped with a different number of dimensions\n     * and reshapes are not reflected on the Python side. However, pytensor has benefits compared to pyarray\n     * in terms of performances. pytensor shapes are stack-allocated which makes iteration upon pytensor\n     * faster than with pyarray.\n     *\n     * @tparam T The type of the element stored in the pyarray.\n     * @sa pyarray\n     */\n    template <class T, std::size_t N, layout_type L>\n    class pytensor : public pycontainer<pytensor<T, N, L>>,\n                     public xcontainer_semantic<pytensor<T, N, L>>\n    {\n    public:\n\n        using self_type = pytensor<T, N, L>;\n        using semantic_base = xcontainer_semantic<self_type>;\n        using base_type = pycontainer<self_type>;\n        using storage_type = typename base_type::storage_type;\n        using value_type = typename base_type::value_type;\n        using reference = typename base_type::reference;\n        using const_reference = typename base_type::const_reference;\n        using pointer = typename base_type::pointer;\n        using size_type = typename base_type::size_type;\n        using shape_type = typename base_type::shape_type;\n        using strides_type = typename base_type::strides_type;\n        using backstrides_type = typename base_type::backstrides_type;\n        using inner_shape_type = typename base_type::inner_shape_type;\n        using inner_strides_type = typename base_type::inner_strides_type;\n        using inner_backstrides_type = typename base_type::inner_backstrides_type;\n        constexpr static std::size_t rank = N;\n\n        pytensor();\n        pytensor(nested_initializer_list_t<T, N> t);\n        pytensor(pybind11::handle h, pybind11::object::borrowed_t);\n        pytensor(pybind11::handle h, pybind11::object::stolen_t);\n        pytensor(const pybind11::object& o);\n\n        explicit pytensor(const shape_type& shape, layout_type l = layout_type::row_major);\n        explicit pytensor(const shape_type& shape, const_reference value, layout_type l = layout_type::row_major);\n        explicit pytensor(const shape_type& shape, const strides_type& strides, const_reference value);\n        explicit pytensor(const shape_type& shape, const strides_type& strides);\n\n        template <class S = shape_type>\n        static pytensor from_shape(S&& shape);\n\n        pytensor(const self_type& rhs);\n        self_type& operator=(const self_type& rhs);\n\n        pytensor(self_type&&) = default;\n        self_type& operator=(self_type&& e) = default;\n\n        template <class E>\n        pytensor(const xexpression<E>& e);\n\n        template <class E>\n        self_type& operator=(const xexpression<E>& e);\n\n        using base_type::begin;\n        using base_type::end;\n\n        static self_type ensure(pybind11::handle h);\n        static bool check_(pybind11::handle h);\n\n#if (PYBIND11_VERSION_MAJOR == 2 && PYBIND11_VERSION_MINOR >= 3) || (PYBIND11_VERSION_MAJOR >= 3)\n        // Prevent ambiguous overload resolution for operators defined for\n        // both xt::xcontainer_semantic and pybind11::object.\n        using semantic_base::operator+=;\n        using semantic_base::operator-=;\n        using semantic_base::operator*=;\n        using semantic_base::operator/=;\n        using semantic_base::operator|=;\n        using semantic_base::operator&=;\n        using semantic_base::operator^=;\n        // using semantic_base::operator<<=;\n        // using semantic_base::operator>>=;\n#endif\n\n    private:\n\n        inner_shape_type m_shape;\n        inner_strides_type m_strides;\n        inner_backstrides_type m_backstrides;\n        storage_type m_storage;\n\n        void init_tensor(const shape_type& shape, const strides_type& strides);\n        void init_from_python();\n\n        inner_shape_type& shape_impl() noexcept;\n        const inner_shape_type& shape_impl() const noexcept;\n        inner_strides_type& strides_impl() noexcept;\n        const inner_strides_type& strides_impl() const noexcept;\n        inner_backstrides_type& backstrides_impl() noexcept;\n        const inner_backstrides_type& backstrides_impl() const noexcept;\n\n        storage_type& storage_impl() noexcept;\n        const storage_type& storage_impl() const noexcept;\n\n        friend class xcontainer<pytensor<T, N, L>>;\n        friend class pycontainer<pytensor<T, N, L>>;\n    };\n\n    /***************************\n     * pytensor implementation *\n     ***************************/\n\n    /**\n     * @name Constructors\n     */\n    //@{\n    /**\n     * Allocates an uninitialized pytensor that holds 1 element.\n     */\n    template <class T, std::size_t N, layout_type L>\n    inline pytensor<T, N, L>::pytensor()\n        : base_type()\n    {\n        m_shape = xtl::make_sequence<shape_type>(N, size_type(1));\n        m_strides = xtl::make_sequence<strides_type>(N, size_type(0));\n        init_tensor(m_shape, m_strides);\n        detail::default_initialize(m_storage);\n    }\n\n    /**\n     * Allocates a pytensor with a nested initializer list.\n     */\n    template <class T, std::size_t N, layout_type L>\n    inline pytensor<T, N, L>::pytensor(nested_initializer_list_t<T, N> t)\n        : base_type()\n    {\n        base_type::resize(xt::shape<shape_type>(t), layout_type::row_major);\n        nested_copy(m_storage.begin(), t);\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline pytensor<T, N, L>::pytensor(pybind11::handle h, pybind11::object::borrowed_t b)\n        : base_type(h, b)\n    {\n        init_from_python();\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline pytensor<T, N, L>::pytensor(pybind11::handle h, pybind11::object::stolen_t s)\n        : base_type(h, s)\n    {\n        init_from_python();\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline pytensor<T, N, L>::pytensor(const pybind11::object& o)\n        : base_type(o)\n    {\n        init_from_python();\n    }\n\n    /**\n     * Allocates an uninitialized pytensor with the specified shape and\n     * layout.\n     * @param shape the shape of the pytensor\n     * @param l the layout_type of the pytensor\n     */\n    template <class T, std::size_t N, layout_type L>\n    inline pytensor<T, N, L>::pytensor(const shape_type& shape, layout_type l)\n    {\n        compute_strides(shape, l, m_strides);\n        init_tensor(shape, m_strides);\n    }\n\n    /**\n     * Allocates a pytensor with the specified shape and layout. Elements\n     * are initialized to the specified value.\n     * @param shape the shape of the pytensor\n     * @param value the value of the elements\n     * @param l the layout_type of the pytensor\n     */\n    template <class T, std::size_t N, layout_type L>\n    inline pytensor<T, N, L>::pytensor(const shape_type& shape,\n                                    const_reference value,\n                                    layout_type l)\n    {\n        compute_strides(shape, l, m_strides);\n        init_tensor(shape, m_strides);\n        std::fill(m_storage.begin(), m_storage.end(), value);\n    }\n\n    /**\n     * Allocates an uninitialized pytensor with the specified shape and strides.\n     * Elements are initialized to the specified value.\n     * @param shape the shape of the pytensor\n     * @param strides the strides of the pytensor\n     * @param value the value of the elements\n     */\n    template <class T, std::size_t N, layout_type L>\n    inline pytensor<T, N, L>::pytensor(const shape_type& shape,\n                                    const strides_type& strides,\n                                    const_reference value)\n    {\n        init_tensor(shape, strides);\n        std::fill(m_storage.begin(), m_storage.end(), value);\n    }\n\n    /**\n     * Allocates an uninitialized pytensor with the specified shape and strides.\n     * @param shape the shape of the pytensor\n     * @param strides the strides of the pytensor\n     */\n    template <class T, std::size_t N, layout_type L>\n    inline pytensor<T, N, L>::pytensor(const shape_type& shape,\n                                    const strides_type& strides)\n    {\n        init_tensor(shape, strides);\n    }\n\n    /**\n     * Allocates and returns an pytensor with the specified shape.\n     * @param shape the shape of the pytensor\n     */\n    template <class T, std::size_t N, layout_type L>\n    template <class S>\n    inline pytensor<T, N, L> pytensor<T, N, L>::from_shape(S&& shape)\n    {\n        detail::check_dims<shape_type>::run(shape.size());\n        auto shp = xtl::forward_sequence<shape_type, S>(shape);\n        return self_type(shp);\n    }\n    //@}\n\n    /**\n     * @name Copy semantic\n     */\n    //@{\n    /**\n     * The copy constructor.\n     */\n    template <class T, std::size_t N, layout_type L>\n    inline pytensor<T, N, L>::pytensor(const self_type& rhs)\n        : base_type(), semantic_base(rhs)\n    {\n        init_tensor(rhs.shape(), rhs.strides());\n        std::copy(rhs.storage().cbegin(), rhs.storage().cend(), this->storage().begin());\n    }\n\n    /**\n     * The assignment operator.\n     */\n    template <class T, std::size_t N, layout_type L>\n    inline auto pytensor<T, N, L>::operator=(const self_type& rhs) -> self_type&\n    {\n        self_type tmp(rhs);\n        *this = std::move(tmp);\n        return *this;\n    }\n    //@}\n\n    /**\n     * @name Extended copy semantic\n     */\n    //@{\n    /**\n     * The extended copy constructor.\n     */\n    template <class T, std::size_t N, layout_type L>\n    template <class E>\n    inline pytensor<T, N, L>::pytensor(const xexpression<E>& e)\n        : base_type()\n    {\n        shape_type shape = xtl::forward_sequence<shape_type, decltype(e.derived_cast().shape())>(e.derived_cast().shape());\n        strides_type strides = xtl::make_sequence<strides_type>(N, size_type(0));\n        compute_strides(shape, layout_type::row_major, strides);\n        init_tensor(shape, strides);\n        semantic_base::assign(e);\n    }\n\n    /**\n     * The extended assignment operator.\n     */\n    template <class T, std::size_t N, layout_type L>\n    template <class E>\n    inline auto pytensor<T, N, L>::operator=(const xexpression<E>& e) -> self_type&\n    {\n        return semantic_base::operator=(e);\n    }\n    //@}\n\n    template <class T, std::size_t N, layout_type L>\n    inline auto pytensor<T, N, L>::ensure(pybind11::handle h) -> self_type\n    {\n        return base_type::ensure(h);\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline bool pytensor<T, N, L>::check_(pybind11::handle h)\n    {\n        return base_type::check_(h);\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline void pytensor<T, N, L>::init_tensor(const shape_type& shape, const strides_type& strides)\n    {\n        detail::numpy_strides<N> python_strides;\n        std::transform(strides.begin(), strides.end(), python_strides.value,\n                       [](auto v) { return sizeof(T) * v; });\n        int flags = NPY_ARRAY_ALIGNED;\n        if (!std::is_const<T>::value)\n        {\n            flags |= NPY_ARRAY_WRITEABLE;\n        }\n        auto dtype = pybind11::detail::npy_format_descriptor<T>::dtype();\n\n        auto tmp = pybind11::reinterpret_steal<pybind11::object>(\n            PyArray_NewFromDescr(&PyArray_Type, (PyArray_Descr*) dtype.release().ptr(), static_cast<int>(shape.size()),\n                        const_cast<npy_intp*>(shape.data()), python_strides.value,\n                        nullptr, flags, nullptr));\n\n        if (!tmp)\n        {\n            throw std::runtime_error(\"NumPy: unable to create ndarray\");\n        }\n\n        this->m_ptr = tmp.release().ptr();\n        m_shape = shape;\n        m_strides = strides;\n        adapt_strides(m_shape, m_strides, m_backstrides);\n        m_storage = storage_type(reinterpret_cast<pointer>(PyArray_DATA(this->python_array())),\n                                 static_cast<size_type>(PyArray_SIZE(this->python_array())));\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline void pytensor<T, N, L>::init_from_python()\n    {\n        if (!static_cast<bool>(*this))\n        {\n            return;\n        }\n\n        if (PyArray_NDIM(this->python_array()) != N)\n        {\n            throw std::runtime_error(\"NumPy: ndarray has incorrect number of dimensions\");\n        }\n\n        std::copy(PyArray_DIMS(this->python_array()), PyArray_DIMS(this->python_array()) + N, m_shape.begin());\n        std::transform(PyArray_STRIDES(this->python_array()), PyArray_STRIDES(this->python_array()) + N, m_strides.begin(),\n                       [](auto v) { return v / sizeof(T); });\n        adapt_strides(m_shape, m_strides, m_backstrides);\n\n        if (L != layout_type::dynamic && !do_strides_match(m_shape, m_strides, L, 1))\n        {\n            throw std::runtime_error(\"NumPy: passing container with bad strides for layout (is it a view?).\");\n        }\n\n        m_storage = storage_type(reinterpret_cast<pointer>(PyArray_DATA(this->python_array())),\n                                 this->get_buffer_size());\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline auto pytensor<T, N, L>::shape_impl() noexcept -> inner_shape_type&\n    {\n        return m_shape;\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline auto pytensor<T, N, L>::shape_impl() const noexcept -> const inner_shape_type&\n    {\n        return m_shape;\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline auto pytensor<T, N, L>::strides_impl() noexcept -> inner_strides_type&\n    {\n        return m_strides;\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline auto pytensor<T, N, L>::strides_impl() const noexcept -> const inner_strides_type&\n    {\n        return m_strides;\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline auto pytensor<T, N, L>::backstrides_impl() noexcept -> inner_backstrides_type&\n    {\n        return m_backstrides;\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline auto pytensor<T, N, L>::backstrides_impl() const noexcept -> const inner_backstrides_type&\n    {\n        return m_backstrides;\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline auto pytensor<T, N, L>::storage_impl() noexcept -> storage_type&\n    {\n        return m_storage;\n    }\n\n    template <class T, std::size_t N, layout_type L>\n    inline auto pytensor<T, N, L>::storage_impl() const noexcept -> const storage_type&\n    {\n        return m_storage;\n    }\n}\n\n#endif\n"
  },
  {
    "path": "include/xtensor-python/pyvectorize.hpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#ifndef PY_VECTORIZE_HPP\n#define PY_VECTORIZE_HPP\n\n#include <type_traits>\n\n#include \"pyarray.hpp\"\n#include \"xtensor/core/xvectorize.hpp\"\n\nnamespace xt\n{\n\n    template <class Func, class R, class... Args>\n    struct pyvectorizer\n    {\n        xvectorizer<Func, R> m_vectorizer;\n\n        template <class F, class = std::enable_if_t<!std::is_same<std::decay_t<F>, pyvectorizer>::value>>\n        pyvectorizer(F&& func)\n            : m_vectorizer(std::forward<F>(func))\n        {\n        }\n\n        inline pyarray<R> operator()(const pyarray<Args>&... args) const\n        {\n            pyarray<R> res = m_vectorizer(args...);\n            return res;\n        }\n    };\n\n    /**\n     * @brief Create numpy universal function from scalar function.\n     */\n    template <class R, class... Args>\n    inline pyvectorizer<R (*)(Args...), R, Args...> pyvectorize(R (*f)(Args...))\n    {\n        return pyvectorizer<R (*)(Args...), R, Args...>(f);\n    }\n\n    /// @cond DOXYGEN_INCLUDE_OVERLOADS\n    template <class F, class R, class... Args>\n    inline pyvectorizer<F, R, Args...> pyvectorize(F&& f, R (*)(Args...))\n    {\n        return pyvectorizer<F, R, Args...>(std::forward<F>(f));\n    }\n\n    template <class F>\n    inline auto pyvectorize(F&& f) -> decltype(pyvectorize(std::forward<F>(f), (detail::get_function_type<F>*)nullptr))\n    {\n        return pyvectorize(std::forward<F>(f), (detail::get_function_type<F>*)nullptr);\n    }\n    /// @endcond\n}\n\n#endif\n"
  },
  {
    "path": "include/xtensor-python/xtensor_python_config.hpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#ifndef XTENSOR_PYTHON_CONFIG_HPP\n#define XTENSOR_PYTHON_CONFIG_HPP\n\n#define XTENSOR_PYTHON_VERSION_MAJOR 0\n#define XTENSOR_PYTHON_VERSION_MINOR 29\n#define XTENSOR_PYTHON_VERSION_PATCH 0\n\n#endif\n"
  },
  {
    "path": "include/xtensor-python/xtensor_type_caster_base.hpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#ifndef XTENSOR_TYPE_CASTER_HPP\n#define XTENSOR_TYPE_CASTER_HPP\n\n#include <cstddef>\n#include <algorithm>\n#include <vector>\n\n#include \"xtensor/containers/xtensor.hpp\"\n#include \"xtensor/containers/xfixed.hpp\"\n\n#include <pybind11/numpy.h>\n#include <pybind11/pybind11.h>\n\nnamespace pybind11\n{\n    namespace detail\n    {\n        template <typename T, xt::layout_type L>\n        struct pybind_array_getter_impl\n        {\n            static auto run(handle src)\n            {\n                return array_t<T, array::c_style | array::forcecast>::ensure(src);\n            }\n        };\n\n        template <typename T>\n        struct pybind_array_getter_impl<T, xt::layout_type::column_major>\n        {\n            static auto run(handle src)\n            {\n                return array_t<T, array::f_style | array::forcecast>::ensure(src);\n            }\n        };\n\n        template <class T>\n        struct pybind_array_getter\n        {\n        };\n\n        template <class T, xt::layout_type L>\n        struct pybind_array_getter<xt::xarray<T, L>>\n        {\n            static auto run(handle src)\n            {\n                return pybind_array_getter_impl<T, L>::run(src);\n            }\n        };\n\n        template <class T, std::size_t N, xt::layout_type L>\n        struct pybind_array_getter<xt::xtensor<T, N, L>>\n        {\n            static auto run(handle src)\n            {\n                return pybind_array_getter_impl<T, L>::run(src);\n            }\n        };\n\n        template <class T, class FSH, xt::layout_type L>\n        struct pybind_array_getter<xt::xtensor_fixed<T, FSH, L>>\n        {\n            static auto run(handle src)\n            {\n                return pybind_array_getter_impl<T, L>::run(src);\n            }\n        };\n\n        template <class CT, class S, xt::layout_type L, class FST>\n        struct pybind_array_getter<xt::xstrided_view<CT, S, L, FST>>\n        {\n            static auto run(handle /*src*/)\n            {\n                return false;\n            }\n        };\n\n        template <class EC, xt::layout_type L, class SC, class Tag>\n        struct pybind_array_getter<xt::xarray_adaptor<EC, L, SC, Tag>>\n        {\n            static auto run(handle src)\n            {\n                auto buf = pybind_array_getter_impl<EC, L>::run(src);\n                return buf;\n            }\n        };\n\n        template <class EC, std::size_t N, xt::layout_type L, class Tag>\n        struct pybind_array_getter<xt::xtensor_adaptor<EC, N, L, Tag>>\n        {\n            static auto run(handle /*src*/)\n            {\n                return false;\n            }\n        };\n\n\n        template <class T>\n        struct pybind_array_dim_checker\n        {\n            template <class B>\n            static bool run(const B& /*buf*/)\n            {\n                return true;\n            }\n        };\n\n        template <class T, std::size_t N, xt::layout_type L>\n        struct pybind_array_dim_checker<xt::xtensor<T, N, L>>\n        {\n            template <class B>\n            static bool run(const B& buf)\n            {\n                return buf.ndim() == N;\n            }\n        };\n\n        template <class T, class FSH, xt::layout_type L>\n        struct pybind_array_dim_checker<xt::xtensor_fixed<T, FSH, L>>\n        {\n            template <class B>\n            static bool run(const B& buf)\n            {\n                return buf.ndim() == FSH::size();\n            }\n        };\n\n\n        template <class T>\n        struct pybind_array_shape_checker\n        {\n            template <class B>\n            static bool run(const B& /*buf*/)\n            {\n                return true;\n            }\n        };\n\n        template <class T, class FSH, xt::layout_type L>\n        struct pybind_array_shape_checker<xt::xtensor_fixed<T, FSH, L>>\n        {\n            template <class B>\n            static bool run(const B& buf)\n            {\n                auto shape = FSH();\n                return std::equal(shape.begin(), shape.end(), buf.shape());\n            }\n        };\n\n        // Casts a strided expression type to numpy array.If given a base,\n        // the numpy array references the src data, otherwise it'll make a copy.\n        // The writeable attributes lets you specify writeable flag for the array.\n        template <typename Type>\n        handle xtensor_array_cast(const Type& src, handle base = handle(), bool writeable = true)\n        {\n            // TODO: make use of xt::pyarray instead of array.\n            std::vector<std::size_t> python_strides(src.strides().size());\n            std::transform(src.strides().begin(), src.strides().end(),\n                           python_strides.begin(), [](auto v) {\n                return sizeof(typename Type::value_type) * v;\n            });\n\n            std::vector<std::size_t> python_shape(src.shape().size());\n            std::copy(src.shape().begin(), src.shape().end(), python_shape.begin());\n\n            array a(python_shape, python_strides, &*(src.begin()), base);\n\n            if (!writeable)\n            {\n                array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;\n            }\n\n            return a.release();\n        }\n\n        // Takes an lvalue ref to some strided expression type and a (python) base object, creating a numpy array that\n        // reference the expression object's data with `base` as the python-registered base class (if omitted,\n        // the base will be set to None, and lifetime management is up to the caller).  The numpy array is\n        // non-writeable if the given type is const.\n        template <typename Type, typename CType>\n        handle xtensor_ref_array(CType& src, handle parent = none())\n        {\n            return xtensor_array_cast<Type>(src, parent, !std::is_const<CType>::value);\n        }\n\n        // Takes a pointer to a strided expression, builds a capsule around it, then returns a numpy\n        // array that references the encapsulated data with a python-side reference to the capsule to tie\n        // its destruction to that of any dependent python objects.  Const-ness is determined by whether or\n        // not the CType of the pointer given is const.\n        template <typename Type, typename CType>\n        handle xtensor_encapsulate(CType* src)\n        {\n            capsule base(src, [](void* o) { delete static_cast<CType*>(o); });\n            return xtensor_ref_array<Type>(*src, base);\n        }\n\n        // Base class of type_caster for strided expressions\n        template <class Type>\n        struct xtensor_type_caster_base\n        {\n\n        private:\n\n            // Cast implementation\n            template <typename CType>\n            static handle cast_impl(CType* src, return_value_policy policy, handle parent)\n            {\n                switch (policy)\n                {\n                case return_value_policy::take_ownership:\n                case return_value_policy::automatic:\n                    return xtensor_encapsulate<Type>(src);\n                case return_value_policy::move:\n                    return xtensor_encapsulate<Type>(new CType(std::move(*src)));\n                case return_value_policy::copy:\n                    return xtensor_array_cast<Type>(*src);\n                case return_value_policy::reference:\n                case return_value_policy::automatic_reference:\n                    return xtensor_ref_array<Type>(*src);\n                case return_value_policy::reference_internal:\n                    return xtensor_ref_array<Type>(*src, parent);\n                default:\n                    throw cast_error(\"unhandled return_value_policy: should not happen!\");\n                };\n            }\n\n        public:\n\n            PYBIND11_TYPE_CASTER(Type, _(\"numpy.ndarray[\") + npy_format_descriptor<typename Type::value_type>::name + _(\"]\"));\n\n            bool load(handle src, bool convert)\n            {\n                using T = typename Type::value_type;\n\n                if (!convert && !array_t<T>::check_(src))\n                {\n                    return false;\n                }\n\n                auto buf = pybind_array_getter<Type>::run(src);\n\n                if (!buf)\n                {\n                    return false;\n                }\n                if (!pybind_array_dim_checker<Type>::run(buf))\n                {\n                    return false;\n                }\n\n                if (!pybind_array_shape_checker<Type>::run(buf))\n                {\n                    return false;\n                }\n\n                std::vector<size_t> shape(buf.ndim());\n                std::copy(buf.shape(), buf.shape() + buf.ndim(), shape.begin());\n                value = Type::from_shape(shape);\n                std::copy(buf.data(), buf.data() + buf.size(), value.data());\n\n                return true;\n            }\n\n            // Normal returned non-reference, non-const value:\n            static handle cast(Type&& src, return_value_policy /* policy */, handle parent)\n            {\n                return cast_impl(&src, return_value_policy::move, parent);\n            }\n\n            // If you return a non-reference const, we mark the numpy array readonly:\n            static handle cast(const Type&& src, return_value_policy /* policy */, handle parent)\n            {\n                return cast_impl(&src, return_value_policy::move, parent);\n            }\n\n            // lvalue reference return; default (automatic) becomes copy\n            static handle cast(Type& src, return_value_policy policy, handle parent)\n            {\n                if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)\n                {\n                    policy = return_value_policy::copy;\n                }\n\n                return cast_impl(&src, policy, parent);\n            }\n\n            // const lvalue reference return; default (automatic) becomes copy\n            static handle cast(const Type& src, return_value_policy policy, handle parent)\n            {\n                if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)\n                {\n                    policy = return_value_policy::copy;\n                }\n\n                return cast(&src, policy, parent);\n            }\n\n            // non-const pointer return\n            static handle cast(Type* src, return_value_policy policy, handle parent)\n            {\n                return cast_impl(src, policy, parent);\n            }\n\n            // const pointer return\n            static handle cast(const Type* src, return_value_policy policy, handle parent)\n            {\n                return cast_impl(src, policy, parent);\n            }\n        };\n    }\n}\n\n#endif\n"
  },
  {
    "path": "readthedocs.yml",
    "content": "version: 2\n\nbuild:\n  os: \"ubuntu-22.04\"\n  tools:\n    python: \"mambaforge-22.9\"\n\nsphinx:\n  # Path to Sphinx configuration file\n  configuration: docs/source/conf.py\n\nconda:\n  environment: docs/environment.yml\n"
  },
  {
    "path": "test/CMakeLists.txt",
    "content": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          #\n# Copyright (c) QuantStack                                                 #\n#                                                                          #\n# Distributed under the terms of the BSD 3-Clause License.                 #\n#                                                                          #\n# The full license is in the file LICENSE, distributed with this software. #\n############################################################################\n\ncmake_minimum_required(VERSION 3.29)\n\nif (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)\n    project(xtensor-python-test)\n\n    find_package(pybind11 REQUIRED)\n    set(PYBIND11_INCLUDE_DIR ${pybind11_INCLUDE_DIRS})\n\n    find_package(xtensor REQUIRED CONFIG)\n    set(XTENSOR_INCLUDE_DIR ${xtensor_INCLUDE_DIRS})\n\n    find_package(xtensor-python REQUIRED CONFIG)\n    set(XTENSOR_PYTHON_INCLUDE_DIR ${xtensor-python_INCLUDE_DIRS})\nendif ()\n\nmessage(STATUS \"Forcing tests build type to Release\")\nset(CMAKE_BUILD_TYPE Release CACHE STRING \"Choose the type of build.\" FORCE)\n\ninclude(CheckCXXCompilerFlag)\n\nstring(TOUPPER \"${CMAKE_BUILD_TYPE}\" U_CMAKE_BUILD_TYPE)\n\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -std=c++20\")\n\nif(MSVC)\n    set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} /EHsc /MP /bigobj\")\n    set(CMAKE_EXE_LINKER_FLAGS /MANIFEST:NO)\nendif()\n\nif (DOWNLOAD_GTEST OR GTEST_SRC_DIR)\n    if(DOWNLOAD_GTEST)\n        # Download and unpack googletest at configure time\n        configure_file(downloadGTest.cmake.in googletest-download/CMakeLists.txt)\n    else()\n        # Copy local source of googletest at configure time\n        configure_file(copyGTest.cmake.in googletest-download/CMakeLists.txt)\n    endif()\n    execute_process(COMMAND ${CMAKE_COMMAND} -G \"${CMAKE_GENERATOR}\" .\n                    RESULT_VARIABLE result\n                    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download )\n    if(result)\n        message(FATAL_ERROR \"CMake step for googletest failed: ${result}\")\n    endif()\n    execute_process(COMMAND ${CMAKE_COMMAND} --build .\n                    RESULT_VARIABLE result\n                    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download )\n    if(result)\n        message(FATAL_ERROR \"Build step for googletest failed: ${result}\")\n    endif()\n\n    set(gtest_force_shared_crt ON CACHE BOOL \"\" FORCE)\n\n    # Add googletest directly to our build. This defines\n    # the gtest and gtest_main targets.\n    add_subdirectory(${CMAKE_CURRENT_BINARY_DIR}/googletest-src\n                     ${CMAKE_CURRENT_BINARY_DIR}/googletest-build EXCLUDE_FROM_ALL)\n\n    set(GTEST_INCLUDE_DIRS \"${gtest_SOURCE_DIR}/include\")\n    set(GTEST_BOTH_LIBRARIES gtest_main gtest)\nelse()\n    find_package(GTest REQUIRED)\nendif()\n\nfind_package(Threads)\n\ninclude_directories(${GTEST_INCLUDE_DIRS})\n\nset(XTENSOR_PYTHON_TESTS\n    main.cpp\n    test_pyarray.cpp\n    test_pyarray_traits.cpp\n    test_pytensor.cpp\n    test_pyvectorize.cpp\n    test_sfinae.cpp\n)\n\nadd_executable(test_xtensor_python ${XTENSOR_PYTHON_TESTS} ${XTENSOR_PYTHON_HEADERS})\ntarget_link_libraries(test_xtensor_python xtensor-python ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} ${PYTHON_LIBRARIES})\n\nif(DOWNLOAD_GTEST OR GTEST_SRC_DIR)\n    add_dependencies(test_xtensor_python gtest_main)\nendif()\n\nadd_custom_target(xtest COMMAND ./test_xtensor_python DEPENDS test_xtensor_python)\n\n"
  },
  {
    "path": "test/copyGTest.cmake.in",
    "content": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          #\n# Copyright (c) QuantStack                                                 #\n#                                                                          #\n# Distributed under the terms of the BSD 3-Clause License.                 #\n#                                                                          #\n# The full license is in the file LICENSE, distributed with this software. #\n############################################################################\n\ncmake_minimum_required(VERSION 3.29)\n\nproject(googletest-download NONE)\n\ninclude(ExternalProject)\nExternalProject_Add(googletest\n    URL               \"${GTEST_SRC_DIR}\"\n    SOURCE_DIR        \"${CMAKE_CURRENT_BINARY_DIR}/googletest-src\"\n    BINARY_DIR        \"${CMAKE_CURRENT_BINARY_DIR}/googletest-build\"\n    CONFIGURE_COMMAND \"\"\n    BUILD_COMMAND     \"\"\n    INSTALL_COMMAND   \"\"\n    TEST_COMMAND      \"\"\n)\n\n"
  },
  {
    "path": "test/downloadGTest.cmake.in",
    "content": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          #\n# Copyright (c) QuantStack                                                 #\n#                                                                          #\n# Distributed under the terms of the BSD 3-Clause License.                 #\n#                                                                          #\n# The full license is in the file LICENSE, distributed with this software. #\n############################################################################\n\ncmake_minimum_required(VERSION 3.29)\n\nproject(googletest-download NONE)\n\ninclude(ExternalProject)\nExternalProject_Add(googletest\n    GIT_REPOSITORY    https://github.com/google/googletest.git\n    GIT_TAG           v1.16.0\n    SOURCE_DIR        \"${CMAKE_CURRENT_BINARY_DIR}/googletest-src\"\n    BINARY_DIR        \"${CMAKE_CURRENT_BINARY_DIR}/googletest-build\"\n    CONFIGURE_COMMAND \"\"\n    BUILD_COMMAND     \"\"\n    INSTALL_COMMAND   \"\"\n    TEST_COMMAND      \"\"\n)\n\n"
  },
  {
    "path": "test/main.cpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n// Required to avoid the error \"std does not have member copysign\"\n#include <cmath>\n\n#include \"gtest/gtest.h\"\n\n#include <pybind11/embed.h>\n\n#define FORCE_IMPORT_ARRAY\n#include \"xtensor-python/pyarray.hpp\"\n\nnamespace py = pybind11;\n\nint main(int argc, char* argv[])\n{\n    // Initialize all the things (Python, numpy, gtest)\n    py::scoped_interpreter guard{};\n    xt::import_numpy();\n    ::testing::InitGoogleTest(&argc, argv);\n\n    // Run test suite\n    int ret = RUN_ALL_TESTS();\n\n    // Return test results\n    return ret;\n}\n\n"
  },
  {
    "path": "test/test_common.hpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#ifndef TEST_COMMON_HPP\n#define TEST_COMMON_HPP\n\n#include \"xtensor/core/xlayout.hpp\"\n#include \"xtensor/misc/xmanipulation.hpp\"\n\n#include \"xtl/xsequence.hpp\"\n\nnamespace xt\n{\n    template <class T, class A>\n    bool operator==(const uvector<T, A>& lhs, const std::vector<T, A>& rhs)\n    {\n        return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin());\n    }\n\n    template <class T, class A>\n    bool operator==(const std::vector<T, A>& lhs, const uvector<T, A>& rhs)\n    {\n        return rhs == lhs;\n    }\n\n    template <class C = std::vector<std::size_t>>\n    struct layout_result\n    {\n        using vector_type = uvector<int>;\n        using size_type = typename C::value_type;\n        using shape_type = C;\n        using strides_type = get_strides_t<shape_type>;\n\n        using assigner_type = std::vector<std::vector<vector_type>>;\n\n        inline layout_result()\n        {\n            m_shape = {3, 2, 4};\n            m_assigner.resize(m_shape[0]);\n            for (size_type i = 0; i < m_shape[0]; ++i)\n            {\n                m_assigner[i].resize(m_shape[1]);\n            }\n            m_assigner[0][0] = {-1, 1, 2, 3};\n            m_assigner[0][1] = {4, 5, 6, 7};\n            m_assigner[1][0] = {8, 9, 10, 11};\n            m_assigner[1][1] = {12, 13, 14, 15};\n            m_assigner[2][0] = {16, 17, 18, 19};\n            m_assigner[2][1] = {20, 21, 22, 23};\n        }\n\n        shape_type m_shape;\n        strides_type m_strides;\n        strides_type m_backstrides;\n        vector_type m_data;\n        layout_type m_layout;\n        assigner_type m_assigner;\n\n        inline size_type size() const { return m_data.size(); }\n        inline const shape_type& shape() const { return m_shape; }\n        inline const strides_type& strides() const { return m_strides; }\n        inline const strides_type& backstrides() const { return m_backstrides; }\n        inline layout_type layout() const { return m_layout; }\n        inline const vector_type& data() const { return m_data; }\n    };\n\n    template <class C = std::vector<std::size_t>>\n    struct row_major_result : layout_result<C>\n    {\n        inline row_major_result()\n        {\n            this->m_strides = {8, 4, 1};\n            this->m_backstrides = {16, 4, 3};\n            this->m_data = {-1, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n                            10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n                            20, 21, 22, 23};\n            this->m_layout = layout_type::row_major;\n        }\n    };\n\n    template <class C = std::vector<std::size_t>>\n    struct column_major_result : layout_result<C>\n    {\n        inline column_major_result()\n        {\n            this->m_strides = {1, 3, 6};\n            this->m_backstrides = {2, 3, 18};\n            this->m_data = {-1, 8, 16, 4, 12, 20,\n                             1, 9, 17, 5, 13, 21,\n                             2, 10, 18, 6, 14, 22,\n                             3, 11, 19, 7, 15, 23};\n            this->m_layout = layout_type::column_major;\n        }\n    };\n\n    template <class C = std::vector<std::size_t>>\n    struct central_major_result : layout_result<C>\n    {\n        inline central_major_result()\n        {\n            this->m_strides = {8, 1, 2};\n            this->m_backstrides = {16, 1, 6};\n            this->m_data = {-1, 4, 1, 5, 2, 6, 3, 7,\n                            8, 12, 9, 13, 10, 14, 11, 15,\n                            16, 20, 17, 21, 18, 22, 19, 23};\n            this->m_layout = layout_type::dynamic;\n        }\n    };\n\n    template <class C = std::vector<std::size_t>>\n    struct unit_shape_result\n    {\n        using vector_type = std::vector<int>;\n        using size_type = typename C::value_type;\n        using shape_type = C;\n        using strides_type = C;\n\n        using assigner_type = std::vector<std::vector<vector_type>>;\n\n        inline unit_shape_result()\n        {\n            m_shape = {3, 1, 4};\n            m_strides = {4, 0, 1};\n            m_backstrides = {8, 0, 3};\n            m_data = {-1, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19};\n            m_layout = layout_type::dynamic;\n            m_assigner.resize(m_shape[0]);\n            for (std::size_t i = 0; i < std::size_t(m_shape[0]); ++i)\n            {\n                m_assigner[i].resize(m_shape[1]);\n            }\n            m_assigner[0][0] = {-1, 1, 2, 3};\n            m_assigner[1][0] = {8, 9, 10, 11};\n            m_assigner[2][0] = {16, 17, 18, 19};\n        }\n\n        shape_type m_shape;\n        strides_type m_strides;\n        strides_type m_backstrides;\n        vector_type m_data;\n        layout_type m_layout;\n        assigner_type m_assigner;\n\n        inline size_type size() const { return m_data.size(); }\n        inline const shape_type& shape() const { return m_shape; }\n        inline const strides_type& strides() const { return m_strides; }\n        inline const strides_type& backstrides() const { return m_backstrides; }\n        inline layout_type layout() const { return m_layout; }\n        inline const vector_type& data() const { return m_data; }\n    };\n\n    template <class V, class R>\n    void compare_shape(V& vec, const R& result, bool compare_layout = true)\n    {\n        EXPECT_TRUE(std::equal(vec.shape().cbegin(), vec.shape().cend(), result.shape().cbegin()));\n        EXPECT_TRUE(std::equal(vec.strides().cbegin(), vec.strides().cend(), result.strides().cbegin()));\n// TODO: check why this does not build on modern MSVC compilers\n#ifndef WIN32\n        EXPECT_TRUE(std::equal(vec.backstrides().cbegin(), vec.backstrides().cend(), result.backstrides().cbegin()));\n#endif\n        EXPECT_EQ(vec.size(), result.size());\n        if (compare_layout)\n        {\n            EXPECT_EQ(vec.layout(), result.layout());\n        }\n    }\n\n    template <class V, class C = std::vector<std::size_t>>\n    void test_resize(V& vec)\n    {\n        {\n            SCOPED_TRACE(\"row_major resize\");\n            row_major_result<C> rm;\n            vec.resize(rm.m_shape, layout_type::row_major);\n            compare_shape(vec, rm);\n        }\n\n        {\n            SCOPED_TRACE(\"different types resize\");\n            row_major_result<C> rm;\n            auto v_copy_a = vec;\n            auto v_copy_b = vec;\n            std::array<std::size_t, 3> ar = {3, 2, 4};\n            std::vector<std::size_t> vr = {3, 2, 4};\n            v_copy_a.resize(ar);\n            compare_shape(v_copy_a, rm);\n            v_copy_b.resize(vr);\n            compare_shape(v_copy_b, rm);\n        }\n\n        {\n            SCOPED_TRACE(\"column_major resize\");\n            column_major_result<C> cm;\n            vec.resize(cm.m_shape, layout_type::column_major);\n            compare_shape(vec, cm);\n        }\n\n        {\n            SCOPED_TRACE(\"central_major resize\");\n            central_major_result<C> cem;\n            vec.resize(cem.m_shape, cem.m_strides);\n            compare_shape(vec, cem);\n        }\n\n        {\n            SCOPED_TRACE(\"unit_shape resize\");\n            unit_shape_result<C> usr;\n            vec.resize(usr.m_shape, layout_type::row_major);\n            compare_shape(vec, usr, false);\n            EXPECT_EQ(vec.layout(), layout_type::row_major);\n        }\n    }\n\n    template <class V, class C = std::vector<std::size_t>>\n    void test_transpose(V& vec)\n    {\n        using shape_type = typename V::shape_type;\n        using strides_type = typename V::strides_type;\n        {\n            SCOPED_TRACE(\"transpose\");\n            shape_type shape_new = xtl::make_sequence<shape_type>(vec.dimension(), 0);\n            std::copy(vec.shape().cbegin(), vec.shape().cend(), shape_new.begin());\n            auto vt = transpose(vec);\n            std::reverse(shape_new.begin(), shape_new.end());\n            EXPECT_EQ(vt.shape(), shape_new);\n            EXPECT_TRUE(std::equal(vt.shape().cbegin(), vt.shape().cend(), shape_new.cbegin()));\n        }\n\n        {\n            SCOPED_TRACE(\"transpose with data\");\n            row_major_result<C> rm;\n            vec.resize(rm.shape(), layout_type::row_major);\n\n            assign_array(vec, rm.m_assigner);\n            EXPECT_TRUE(std::equal(vec.storage().cbegin(), vec.storage().cend(), rm.m_data.cbegin()));\n\n            auto vec_copy = vec;\n\n            shape_type shape_new(rm.shape());\n            auto vt = transpose(vec);\n            std::reverse(shape_new.begin(), shape_new.end());\n            EXPECT_EQ(vt.shape(), shape_new);\n            EXPECT_TRUE(std::equal(vt.storage().cbegin(), vt.storage().cend(), rm.m_data.cbegin()));\n\n            strides_type new_strides = {rm.m_strides[2],\n                                        rm.m_strides[1],\n                                        rm.m_strides[0]};\n            EXPECT_EQ(vt.strides(), new_strides);\n\n            strides_type new_backstrides = {rm.m_backstrides[2],\n                                            rm.m_backstrides[1],\n                                            rm.m_backstrides[0]};\n            EXPECT_EQ(vt.backstrides(), new_backstrides);\n\n            EXPECT_EQ(vec_copy(0, 0, 0), vt(0, 0, 0));\n            EXPECT_EQ(vec_copy(0, 1, 0), vt(0, 1, 0));\n            EXPECT_EQ(vec_copy(1, 1, 0), vt(0, 1, 1));\n            EXPECT_EQ(vec_copy(1, 1, 2), vt(2, 1, 1));\n        }\n\n        {\n            SCOPED_TRACE(\"transpose with permutation\");\n            row_major_result<C> rm;\n            vec.resize(rm.shape(), layout_type::row_major);\n\n            assign_array(vec, rm.m_assigner);\n            EXPECT_TRUE(std::equal(vec.storage().cbegin(), vec.storage().cend(), rm.m_data.cbegin()));\n\n            auto vec_copy = vec;\n\n            shape_type a = xtl::make_sequence<shape_type>(vec.dimension(), 0);\n            std::copy(vec.shape().cbegin(), vec.shape().cend(), a.begin());\n            auto vt = transpose(vec, {1, 0, 2});\n            shape_type shape_new = {a[1], a[0], a[2]};\n            EXPECT_TRUE(std::equal(vt.shape().cbegin(), vt.shape().cend(), shape_new.begin()));\n            EXPECT_TRUE(std::equal(vt.storage().cbegin(), vt.storage().cend(), rm.m_data.cbegin()));\n\n            strides_type new_strides = {rm.m_strides[1],\n                                        rm.m_strides[0],\n                                        rm.m_strides[2]};\n            EXPECT_EQ(vt.strides(), new_strides);\n\n            // strides_type new_backstrides = {rm.m_backstrides[1],\n            //                                 rm.m_backstrides[0],\n            //                                 rm.m_backstrides[2]};\n            // EXPECT_EQ(vt.backstrides(), new_backstrides);\n\n            EXPECT_EQ(vec_copy(0, 0, 0), vt(0, 0, 0));\n            EXPECT_EQ(vec_copy(0, 1, 0), vt(1, 0, 0));\n            EXPECT_EQ(vec_copy(1, 1, 0), vt(1, 1, 0));\n            EXPECT_EQ(vec_copy(1, 1, 2), vt(1, 1, 2));\n\n            // Compilation check only\n            std::vector<std::size_t> perm = {1, 0, 2};\n            transpose(vec, perm);\n        }\n\n        {\n            SCOPED_TRACE(\"transpose permutation throws\");\n            row_major_result<C> rm;\n            vec.resize(rm.shape(), layout_type::row_major);\n\n            EXPECT_THROW(transpose(vec, {1, 1, 0}, check_policy::full()), transpose_error);\n            EXPECT_THROW(transpose(vec, {1, 0, 2, 3}, check_policy::full()), transpose_error);\n            EXPECT_THROW(transpose(vec, {1, 2}, check_policy::full()), transpose_error);\n            EXPECT_THROW(transpose(vec, {3, 0, 1}, check_policy::full()), transpose_error);\n        }\n    }\n\n    template <class V1, class V2>\n    void assign_array(V1& dst, const V2& src)\n    {\n        for (std::size_t i = 0; i < std::size_t(dst.shape()[0]); ++i)\n        {\n            for (std::size_t j = 0; j < std::size_t(dst.shape()[1]); ++j)\n            {\n                for (std::size_t k = 0; k < std::size_t(dst.shape()[2]); ++k)\n                {\n                    dst(i, j, k) = src[i][j][k];\n                }\n            }\n        }\n    }\n\n    template <class V>\n    void test_bound_check(V& vec)\n    {\n#ifdef XTENSOR_ENABLE_ASSERT\n        EXPECT_ANY_THROW(vec(10, 10, 10));\n#else\n        (void)vec;\n#endif\n    }\n\n    template <class V, class C = std::vector<std::size_t>>\n    void test_access(V& vec)\n    {\n        {\n            SCOPED_TRACE(\"row_major access\");\n            row_major_result<C> rm;\n            vec.resize(rm.m_shape, layout_type::row_major);\n            assign_array(vec, rm.m_assigner);\n            EXPECT_TRUE(std::equal(vec.storage().cbegin(), vec.storage().cend(), rm.m_data.cbegin()));\n            EXPECT_EQ(vec(0, 1, 1), vec(1, 1));\n            EXPECT_EQ(vec(2, 1, 3), vec(2, 2, 2, 1, 3));\n            test_bound_check(vec);\n        }\n\n        {\n            SCOPED_TRACE(\"column_major access\");\n            column_major_result<C> cm;\n            vec.resize(cm.m_shape, layout_type::column_major);\n            assign_array(vec, cm.m_assigner);\n            EXPECT_TRUE(std::equal(vec.storage().cbegin(), vec.storage().cend(), cm.m_data.cbegin()));\n            EXPECT_EQ(vec(0, 1, 1), vec(1, 1));\n            EXPECT_EQ(vec(2, 1, 3), vec(2, 2, 2, 1, 3));\n            test_bound_check(vec);\n        }\n\n        {\n            SCOPED_TRACE(\"central_major access\");\n            central_major_result<C> cem;\n            vec.resize(cem.m_shape, cem.m_strides);\n            assign_array(vec, cem.m_assigner);\n            EXPECT_TRUE(std::equal(vec.storage().cbegin(), vec.storage().cend(), cem.m_data.cbegin()));\n            EXPECT_EQ(vec(0, 1, 1), vec(1, 1));\n            EXPECT_EQ(vec(2, 1, 3), vec(2, 2, 2, 1, 3));\n            test_bound_check(vec);\n        }\n\n        {\n            SCOPED_TRACE(\"unit_shape access\");\n            unit_shape_result<C> usr;\n            vec.resize(usr.m_shape, layout_type::row_major);\n            assign_array(vec, usr.m_assigner);\n            EXPECT_TRUE(std::equal(vec.storage().cbegin(), vec.storage().cend(), usr.m_data.cbegin()));\n            EXPECT_EQ(vec(0, 1, 0), vec(1, 0));\n            EXPECT_EQ(vec(2, 0, 3), vec(2, 2, 2, 0, 3));\n            test_bound_check(vec);\n        }\n    }\n\n    template <class V, class C = std::vector<std::size_t>>\n    void test_element(V& vec)\n    {\n        {\n            SCOPED_TRACE(\"row_major access\");\n            row_major_result<C> rm;\n            vec.resize(rm.m_shape, layout_type::row_major);\n            assign_array(vec, rm.m_assigner);\n            EXPECT_EQ(vec.data(), rm.m_data);\n            std::vector<std::size_t> index1 = {0, 1, 1};\n            std::vector<std::size_t> index2 = {1, 1};\n            std::vector<std::size_t> index3 = {2, 1, 3};\n            std::vector<std::size_t> index4 = {2, 2, 2, 1, 3};\n            EXPECT_EQ(vec.element(index1.begin(), index1.end()), vec.element(index2.begin(), index2.end()));\n            EXPECT_EQ(vec.element(index3.begin(), index3.end()), vec.element(index4.begin(), index4.end()));\n            test_bound_check(vec);\n        }\n\n        {\n            SCOPED_TRACE(\"column_major access\");\n            column_major_result<C> cm;\n            vec.resize(cm.m_shape, layout_type::column_major);\n            assign_array(vec, cm.m_assigner);\n            EXPECT_EQ(vec.data(), cm.m_data);\n            std::vector<std::size_t> index1 = {0, 1, 1};\n            std::vector<std::size_t> index2 = {1, 1};\n            std::vector<std::size_t> index3 = {2, 1, 3};\n            std::vector<std::size_t> index4 = {2, 2, 2, 1, 3};\n            EXPECT_EQ(vec.element(index1.begin(), index1.end()), vec.element(index2.begin(), index2.end()));\n            EXPECT_EQ(vec.element(index3.begin(), index3.end()), vec.element(index4.begin(), index4.end()));\n            test_bound_check(vec);\n        }\n\n        {\n            SCOPED_TRACE(\"central_major access\");\n            central_major_result<C> cem;\n            vec.resize(cem.m_shape, cem.m_strides);\n            assign_array(vec, cem.m_assigner);\n            EXPECT_EQ(vec.data(), cem.m_data);\n            std::vector<std::size_t> index1 = {0, 1, 1};\n            std::vector<std::size_t> index2 = {1, 1};\n            std::vector<std::size_t> index3 = {2, 1, 3};\n            std::vector<std::size_t> index4 = {2, 2, 2, 1, 3};\n            EXPECT_EQ(vec.element(index1.begin(), index1.end()), vec.element(index2.begin(), index2.end()));\n            EXPECT_EQ(vec.element(index3.begin(), index3.end()), vec.element(index4.begin(), index4.end()));\n            test_bound_check(vec);\n        }\n\n        {\n            SCOPED_TRACE(\"unit_shape access\");\n            unit_shape_result<C> usr;\n            vec.resize(usr.m_shape, layout_type::row_major);\n            assign_array(vec, usr.m_assigner);\n            EXPECT_EQ(vec.data(), usr.m_data);\n            std::vector<std::size_t> index1 = {0, 1, 0};\n            std::vector<std::size_t> index2 = {1, 0};\n            std::vector<std::size_t> index3 = {2, 0, 3};\n            std::vector<std::size_t> index4 = {2, 2, 2, 0, 3};\n            EXPECT_EQ(vec.element(index1.begin(), index1.end()), vec.element(index2.begin(), index2.end()));\n            EXPECT_EQ(vec.element(index3.begin(), index3.end()), vec.element(index4.begin(), index4.end()));\n            test_bound_check(vec);\n        }\n    }\n\n    template <class V1, class V2>\n    void indexed_assign_array(V1& dst, const V2& src)\n    {\n        xindex index(dst.dimension());\n        for (std::size_t i = 0; i < std::size_t(dst.shape()[0]); ++i)\n        {\n            index[0] = i;\n            for (std::size_t j = 0; j < std::size_t(dst.shape()[1]); ++j)\n            {\n                index[1] = j;\n                for (std::size_t k = 0; k < std::size_t(dst.shape()[2]); ++k)\n                {\n                    index[2] = k;\n                    dst[index] = src[i][j][k];\n                }\n            }\n        }\n    }\n\n    template <class V, class C = std::vector<std::size_t>>\n    void test_indexed_access(V& vec)\n    {\n        xindex index1 = {1, 1};\n        xindex index2 = {2, 2, 2, 1, 3};\n        {\n            SCOPED_TRACE(\"row_major access\");\n            row_major_result<C> rm;\n            vec.resize(rm.m_shape, layout_type::row_major);\n            indexed_assign_array(vec, rm.m_assigner);\n            EXPECT_TRUE(std::equal(vec.storage().cbegin(), vec.storage().cend(), rm.m_data.cbegin()));\n            EXPECT_EQ(vec(0, 1, 1), vec[index1]);\n            EXPECT_EQ(vec(2, 1, 3), vec[index2]);\n        }\n\n        {\n            SCOPED_TRACE(\"column_major access\");\n            column_major_result<C> cm;\n            vec.resize(cm.m_shape, layout_type::column_major);\n            indexed_assign_array(vec, cm.m_assigner);\n            EXPECT_TRUE(std::equal(vec.storage().cbegin(), vec.storage().cend(), cm.m_data.cbegin()));\n            EXPECT_EQ(vec(0, 1, 1), vec[index1]);\n            EXPECT_EQ(vec(2, 1, 3), vec[index2]);\n        }\n\n        {\n            SCOPED_TRACE(\"central_major access\");\n            central_major_result<C> cem;\n            vec.resize(cem.m_shape, cem.m_strides);\n            indexed_assign_array(vec, cem.m_assigner);\n            EXPECT_TRUE(std::equal(vec.storage().cbegin(), vec.storage().cend(), cem.m_data.cbegin()));\n            EXPECT_EQ(vec(0, 1, 1), vec[index1]);\n            EXPECT_EQ(vec(2, 1, 3), vec[index2]);\n        }\n\n        {\n            SCOPED_TRACE(\"unit_shape access\");\n            unit_shape_result<C> usr;\n            vec.resize(usr.m_shape, layout_type::row_major);\n            indexed_assign_array(vec, usr.m_assigner);\n            EXPECT_TRUE(std::equal(vec.storage().cbegin(), vec.storage().cend(), usr.m_data.cbegin()));\n            xindex id1 = {1, 0};\n            xindex id2 = {2, 2, 2, 0, 3};\n            EXPECT_EQ(vec(0, 1, 0), vec[id1]);\n            EXPECT_EQ(vec(2, 0, 3), vec[id2]);\n        }\n    }\n\n    template <class V>\n    void test_broadcast(V& vec)\n    {\n        using shape_type = typename V::shape_type;\n\n        shape_type s = {3, 1, 4, 2};\n        vec.resize(s);\n\n        {\n            SCOPED_TRACE(\"same shape\");\n            shape_type s1 = s;\n            bool res = vec.broadcast_shape(s1);\n            EXPECT_EQ(s1, s);\n            EXPECT_TRUE(res);\n        }\n\n        {\n            SCOPED_TRACE(\"different shape\");\n            shape_type s2 = {3, 5, 1, 2};\n            shape_type s2r = {3, 5, 4, 2};\n            bool res = vec.broadcast_shape(s2);\n            EXPECT_EQ(s2, s2r);\n            EXPECT_FALSE(res);\n        }\n\n        {\n            SCOPED_TRACE(\"incompatible shapes\");\n            shape_type s4 = {2, 1, 3, 2};\n            bool wit = false;\n            try\n            {\n                vec.broadcast_shape(s4);\n            }\n            catch (broadcast_error&)\n            {\n                wit = true;\n            }\n            EXPECT_TRUE(wit);\n        }\n    }\n\n    template <class V>\n    void test_broadcast2(V& vec)\n    {\n        using shape_type = typename V::shape_type;\n\n        shape_type s = {3, 1, 4, 2};\n        vec.resize(s);\n\n        {\n            SCOPED_TRACE(\"different dimensions\");\n            shape_type s3 = {5, 3, 1, 4, 2};\n            shape_type s3r = s3;\n            bool res = vec.broadcast_shape(s3);\n            EXPECT_EQ(s3, s3r);\n            EXPECT_FALSE(res);\n        }\n    }\n\n    template <class VRM, class VCM, class C = std::vector<std::size_t>>\n    void test_iterator(VRM& vecrm, VCM& veccm)\n    {\n        {\n            SCOPED_TRACE(\"row_major storage iterator\");\n            row_major_result<C> rm;\n            vecrm.resize(rm.m_shape, layout_type::row_major);\n            std::copy(rm.data().cbegin(), rm.data().cend(), vecrm.template begin<layout_type::row_major>());\n            EXPECT_TRUE(std::equal(rm.data().cbegin(), rm.data().cend(), vecrm.storage().cbegin()));\n            //EXPECT_EQ(vecrm.template end<layout_type::row_major>(), vecrm.data().end());\n        }\n\n        {\n            SCOPED_TRACE(\"column_major storage iterator\");\n            column_major_result<C> cm;\n            veccm.resize(cm.m_shape, layout_type::column_major);\n            std::copy(cm.data().cbegin(), cm.data().cend(), veccm.template begin<layout_type::column_major>());\n            EXPECT_TRUE(std::equal(cm.data().cbegin(), cm.data().cend(), veccm.storage().cbegin()));\n            //EXPECT_EQ(veccm.template end<layout_type::column_major>(), veccm.data().end());\n        }\n    }\n\n    template <class V, class C = std::vector<std::size_t>>\n    void test_xiterator(V& vec)\n    {\n        row_major_result<C> rm;\n        vec.resize(rm.m_shape, layout_type::row_major);\n        indexed_assign_array(vec, rm.m_assigner);\n        size_t nb_iter = vec.size() / 2;\n        using shape_type = std::vector<size_t>;\n\n        // broadcast_iterator\n        {\n            auto iter = vec.template begin<layout_type::row_major>();\n            auto iter_end = vec.template end<layout_type::row_major>();\n            for (size_t i = 0; i < nb_iter; ++i)\n            {\n                ++iter;\n            }\n            EXPECT_EQ(vec.data()[nb_iter], *iter);\n            for (size_t i = 0; i < nb_iter; ++i)\n            {\n                ++iter;\n            }\n            EXPECT_EQ(iter, iter_end);\n        }\n\n        // shaped_xiterator\n        {\n            shape_type shape(rm.m_shape.size() + 1);\n            std::copy(rm.m_shape.begin(), rm.m_shape.end(), shape.begin() + 1);\n            shape[0] = 2;\n            auto iter = vec.template begin<shape_type, layout_type::row_major>(shape);\n            auto iter_end = vec.template end<shape_type, layout_type::row_major>(shape);\n            for (size_t i = 0; i < 2 * nb_iter; ++i)\n            {\n                ++iter;\n            }\n            EXPECT_EQ(vec.data()[0], *iter);\n            for (size_t i = 0; i < 2 * nb_iter; ++i)\n            {\n                ++iter;\n            }\n            EXPECT_EQ(iter, iter_end);\n        }\n\n        // column broadcast_iterator\n        {\n            auto iter = vec.template begin<layout_type::column_major>();\n            auto iter_end = vec.template end<layout_type::column_major>();\n            for (size_t i = 0; i < nb_iter; ++i)\n            {\n                ++iter;\n            }\n            EXPECT_EQ(vec(0, 0, 2), *iter);\n            for (size_t i = 0; i < nb_iter; ++i)\n            {\n                ++iter;\n            }\n            EXPECT_EQ(iter, iter_end);\n        }\n\n        // column shaped_xiterator\n        {\n            shape_type shape(rm.m_shape.size() + 1);\n            std::copy(rm.m_shape.begin(), rm.m_shape.end(), shape.begin() + 1);\n            shape[0] = 2;\n            auto iter = vec.template begin<shape_type, layout_type::column_major>(shape);\n            auto iter_end = vec.template end<shape_type, layout_type::column_major>(shape);\n            for (size_t i = 0; i < 2 * nb_iter; ++i)\n            {\n                ++iter;\n            }\n            EXPECT_EQ(vec(0, 0, 2), *iter);\n            for (size_t i = 0; i < 2 * nb_iter; ++i)\n            {\n                ++iter;\n            }\n            EXPECT_EQ(iter, iter_end);\n        }\n    }\n\n    template <class V, class C = std::vector<std::size_t>>\n    void test_reverse_xiterator(V& vec)\n    {\n        row_major_result<C> rm;\n        vec.resize(rm.m_shape, layout_type::row_major);\n        indexed_assign_array(vec, rm.m_assigner);\n        size_t nb_iter = vec.size() / 2;\n\n        // broadcast_iterator\n        {\n            auto iter = vec.template rbegin<layout_type::row_major>();\n            auto iter_end = vec.template rend<layout_type::row_major>();\n            for (size_t i = 0; i < nb_iter; ++i)\n            {\n                ++iter;\n            }\n            EXPECT_EQ(vec.data()[nb_iter - 1], *iter);\n            for (size_t i = 0; i < nb_iter; ++i)\n            {\n                ++iter;\n            }\n            EXPECT_EQ(iter, iter_end);\n        }\n\n        // shaped_xiterator\n        {\n            using shape_type = std::vector<size_t>;\n            shape_type shape(rm.m_shape.size() + 1);\n            std::copy(rm.m_shape.begin(), rm.m_shape.end(), shape.begin() + 1);\n            shape[0] = 2;\n            auto iter = vec.template rbegin<shape_type, layout_type::row_major>(shape);\n            auto iter_end = vec.template rend<shape_type, layout_type::row_major>(shape);\n            for (size_t i = 0; i < 2 * nb_iter; ++i)\n            {\n                ++iter;\n            }\n            EXPECT_EQ(vec.data()[2 * nb_iter - 1], *iter);\n            for (size_t i = 0; i < 2 * nb_iter; ++i)\n            {\n                ++iter;\n            }\n            EXPECT_EQ(iter, iter_end);\n        }\n    }\n}\n\n#endif\n"
  },
  {
    "path": "test/test_pyarray.cpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#include \"gtest/gtest.h\"\n\n#include \"xtensor-python/pyarray.hpp\"\n\n#include \"xtensor/containers/xarray.hpp\"\n#include \"xtensor/views/xview.hpp\"\n\n#include \"test_common.hpp\"\n\nnamespace xt\n{\n    using container_type = std::vector<npy_intp>;\n\n    template <class T>\n    using ndarray = pyarray<T, xt::layout_type::row_major>;\n\n    void test1 (ndarray<int>const& x)\n    {\n        ndarray<int> y = x;\n        ndarray<int> z = xt::zeros<int>({10});\n    }\n\n    double compute(ndarray<double> const& xs)\n    {\n        auto v = xt::view (xs, 0, xt::all());\n        return v(0);\n    }\n\n    TEST(pyarray, initializer_constructor)\n    {\n        pyarray<int> r\n          {{{ 0,  1,  2},\n            { 3,  4,  5},\n            { 6,  7,  8}},\n           {{ 9, 10, 11},\n            {12, 13, 14},\n            {15, 16, 17}}};\n\n        EXPECT_EQ(r.layout(), xt::layout_type::row_major);\n        EXPECT_EQ(r.dimension(), 3);\n        EXPECT_EQ(r(0, 0, 1), 1);\n        EXPECT_EQ(r.shape()[0], 2);\n\n        pyarray<int, xt::layout_type::column_major> c\n          {{{ 0,  1,  2},\n            { 3,  4,  5},\n            { 6,  7,  8}},\n           {{ 9, 10, 11},\n            {12, 13, 14},\n            {15, 16, 17}}};\n\n        EXPECT_EQ(c.layout(), xt::layout_type::column_major);\n        EXPECT_EQ(c.dimension(), 3);\n        EXPECT_EQ(c(0, 0, 1), 1);\n        EXPECT_EQ(c.shape()[0], 2);\n\n        pyarray<int, xt::layout_type::dynamic> d\n          {{{ 0,  1,  2},\n            { 3,  4,  5},\n            { 6,  7,  8}},\n           {{ 9, 10, 11},\n            {12, 13, 14},\n            {15, 16, 17}}};\n\n        EXPECT_EQ(d.layout(), xt::layout_type::row_major);\n        EXPECT_EQ(d.dimension(), 3);\n        EXPECT_EQ(d(0, 0, 1), 1);\n        EXPECT_EQ(d.shape()[0], 2);\n    }\n\n    TEST(pyarray, expression)\n    {\n        pyarray<int> a = xt::empty<int>({});\n\n        EXPECT_EQ(a.layout(), xt::layout_type::row_major);\n        EXPECT_EQ(a.dimension(), 0);\n        EXPECT_EQ(a.size(), 1);\n\n        pyarray<int> b = xt::empty<int>({5});\n\n        EXPECT_EQ(b.layout(), xt::layout_type::row_major);\n        EXPECT_EQ(b.dimension(), 1);\n        EXPECT_EQ(b.size(), 5);\n\n        pyarray<int> c = xt::empty<int>({5, 3});\n\n        EXPECT_EQ(c.layout(), xt::layout_type::row_major);\n        EXPECT_EQ(c.dimension(), 2);\n        EXPECT_EQ(c.size(), 15);\n        EXPECT_EQ(c.shape(0), 5);\n        EXPECT_EQ(c.shape(1), 3);\n    }\n\n    TEST(pyarray, shaped_constructor)\n    {\n        {\n            SCOPED_TRACE(\"row_major constructor\");\n            row_major_result<> rm;\n            pyarray<int> ra(rm.m_shape);\n            compare_shape(ra, rm);\n            EXPECT_EQ(layout_type::row_major, ra.layout());\n        }\n\n        {\n            SCOPED_TRACE(\"column_major constructor\");\n            column_major_result<> cm;\n            pyarray<int> ca(cm.m_shape, layout_type::column_major);\n            compare_shape(ca, cm);\n            EXPECT_EQ(layout_type::column_major, ca.layout());\n        }\n    }\n\n    TEST(pyarray, from_shape)\n    {\n        auto arr = pyarray<double>::from_shape({5, 2, 6});\n        auto exp_shape = std::vector<std::size_t>{5, 2, 6};\n        EXPECT_TRUE(std::equal(arr.shape().begin(), arr.shape().end(), exp_shape.begin()));\n        EXPECT_EQ(arr.shape().size(), 3);\n        EXPECT_EQ(arr.size(), 5 * 2 * 6);\n    }\n\n    TEST(pyarray, strided_constructor)\n    {\n        central_major_result<> cmr;\n        pyarray<int> cma(cmr.m_shape, cmr.m_strides);\n        compare_shape(cma, cmr);\n    }\n\n    TEST(pyarray, valued_constructor)\n    {\n        {\n            SCOPED_TRACE(\"row_major valued constructor\");\n            row_major_result<> rm;\n            int value = 2;\n            pyarray<int> ra(rm.m_shape, value);\n            compare_shape(ra, rm);\n            std::vector<int> vec(ra.size(), value);\n            EXPECT_TRUE(std::equal(vec.cbegin(), vec.cend(), ra.storage().cbegin()));\n        }\n\n        {\n            SCOPED_TRACE(\"column_major valued constructor\");\n            column_major_result<> cm;\n            int value = 2;\n            pyarray<int> ca(cm.m_shape, value, layout_type::column_major);\n            compare_shape(ca, cm);\n            std::vector<int> vec(ca.size(), value);\n            EXPECT_TRUE(std::equal(vec.cbegin(), vec.cend(), ca.storage().cbegin()));\n        }\n    }\n\n    TEST(pyarray, strided_valued_constructor)\n    {\n        central_major_result<> cmr;\n        int value = 2;\n        pyarray<int> cma(cmr.m_shape, cmr.m_strides, value);\n        compare_shape(cma, cmr);\n        std::vector<int> vec(cma.size(), value);\n        EXPECT_TRUE(std::equal(vec.cbegin(), vec.cend(), cma.storage().cbegin()));\n    }\n\n    TEST(pyarray, copy_semantic)\n    {\n        central_major_result<> res;\n        int value = 2;\n        pyarray<int> a(res.m_shape, res.m_strides, value);\n\n        {\n            SCOPED_TRACE(\"copy constructor\");\n            pyarray<int> b(a);\n            compare_shape(a, b);\n            EXPECT_EQ(a.storage(), b.storage());\n            a.data()[0] += 1;\n            EXPECT_NE(a.storage()[0], b.storage()[0]);\n        }\n\n        {\n            SCOPED_TRACE(\"assignment operator\");\n            row_major_result<> r;\n            pyarray<int> c(r.m_shape, 0);\n            EXPECT_NE(a.storage(), c.storage());\n            c = a;\n            compare_shape(a, c);\n            EXPECT_EQ(a.storage(), c.storage());\n            a.data()[0] += 1;\n            EXPECT_NE(a.storage()[0], c.storage()[0]);\n        }\n    }\n\n    TEST(pyarray, move_semantic)\n    {\n        central_major_result<> res;\n        int value = 2;\n        pyarray<int> a(res.m_shape, res.m_strides, value);\n\n        {\n            SCOPED_TRACE(\"move constructor\");\n            pyarray<int> tmp(a);\n            pyarray<int> b(std::move(tmp));\n            compare_shape(a, b);\n            EXPECT_EQ(a.storage(), b.storage());\n        }\n\n        {\n            SCOPED_TRACE(\"move assignment\");\n            row_major_result<> r;\n            pyarray<int> c(r.m_shape, 0);\n            EXPECT_NE(a.storage(), c.storage());\n            pyarray<int> tmp(a);\n            c = std::move(tmp);\n            compare_shape(a, c);\n            EXPECT_EQ(a.storage(), c.storage());\n        }\n    }\n\n    TEST(pyarray, extended_constructor)\n    {\n        xt::xarray<int> a1 = { { 1, 2 },{ 3, 4 } };\n        xt::xarray<int> a2 = { { 1, 2 },{ 3, 4 } };\n        pyarray<int> c = a1 + a2;\n        EXPECT_EQ(c(0, 0), a1(0, 0) + a2(0, 0));\n        EXPECT_EQ(c(0, 1), a1(0, 1) + a2(0, 1));\n        EXPECT_EQ(c(1, 0), a1(1, 0) + a2(1, 0));\n        EXPECT_EQ(c(1, 1), a1(1, 1) + a2(1, 1));\n\n        pyarray<int, xt::layout_type::row_major> d = a1 + a2;\n        EXPECT_EQ(d(0, 0), a1(0, 0) + a2(0, 0));\n        EXPECT_EQ(d(0, 1), a1(0, 1) + a2(0, 1));\n        EXPECT_EQ(d(1, 0), a1(1, 0) + a2(1, 0));\n        EXPECT_EQ(d(1, 1), a1(1, 1) + a2(1, 1));\n\n        pyarray<int, xt::layout_type::column_major> e = a1 + a2;\n        EXPECT_EQ(e(0, 0), a1(0, 0) + a2(0, 0));\n        EXPECT_EQ(e(0, 1), a1(0, 1) + a2(0, 1));\n        EXPECT_EQ(e(1, 0), a1(1, 0) + a2(1, 0));\n        EXPECT_EQ(e(1, 1), a1(1, 1) + a2(1, 1));\n    }\n\n    TEST(pyarray, resize)\n    {\n        pyarray<int> a;\n        test_resize(a);\n\n        pyarray<int> b = { {1, 2}, {3, 4} };\n        a.resize(b.shape());\n        EXPECT_EQ(a.shape(), b.shape());\n    }\n\n    TEST(pyarray, transpose)\n    {\n        pyarray<int> a;\n        test_transpose(a);\n    }\n\n    TEST(pyarray, access)\n    {\n        pyarray<int> a;\n        test_access(a);\n    }\n\n    TEST(pyarray, indexed_access)\n    {\n        pyarray<int> a;\n        test_indexed_access(a);\n    }\n\n    TEST(pyarray, broadcast_shape)\n    {\n        pyarray<int> a;\n        test_broadcast(a);\n        test_broadcast2(a);\n    }\n\n    TEST(pyarray, iterator)\n    {\n        pyarray<int> a;\n        pyarray<int> b;\n        test_iterator(a, b);\n\n        pyarray<int, layout_type::row_major> c;\n        bool truthy = std::is_same<decltype(c.begin()), int*>::value;\n        EXPECT_TRUE(truthy);\n    }\n\n    TEST(pyarray, initializer_list)\n    {\n        pyarray<int> a0(1);\n        pyarray<int> a1({1, 2});\n        pyarray<int> a2({{1, 2}, {2, 4}, {5, 6}});\n        EXPECT_EQ(1, a0());\n        EXPECT_EQ(2, a1(1));\n        EXPECT_EQ(4, a2(1, 1));\n    }\n\n    TEST(pyarray, zerod)\n    {\n        pyarray<int> a;\n        EXPECT_EQ(0, a());\n    }\n\n    TEST(pyarray, reshape)\n    {\n        pyarray<int> a = {{1,2,3}, {4,5,6}};\n        auto ptr = a.data();\n        a.reshape({1, 6});\n        std::vector<std::size_t> sc1({1, 6});\n        EXPECT_TRUE(std::equal(sc1.begin(), sc1.end(), a.shape().begin()) && a.shape().size() == 2);\n        EXPECT_EQ(ptr, a.data());\n        a.reshape({6});\n        std::vector<std::size_t> sc2 = {6};\n        EXPECT_TRUE(std::equal(sc2.begin(), sc2.end(), a.shape().begin()) && a.shape().size() == 1);\n        EXPECT_EQ(ptr, a.data());\n    }\n\n    TEST(pyarray, view)\n    {\n        xt::pyarray<int> arr = xt::zeros<int>({ 10 });\n        auto v = xt::view(arr, xt::all());\n        EXPECT_EQ(v(0), 0.);\n    }\n\n    TEST(pyarray, zerod_copy)\n    {\n        xt::pyarray<int> arr = 2;\n        xt::pyarray<int> arr2(arr);\n        EXPECT_EQ(arr(), arr2());\n    }\n}\n"
  },
  {
    "path": "test/test_pyarray_traits.cpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#include \"gtest/gtest.h\"\n\n#include \"xtensor-python/pyarray.hpp\"\n\n\n\nnamespace xt\n{\n    namespace testing\n    {\n        class pyarray_traits: public ::testing::Test\n        {\n        protected:\n        \n            using dynamic_type = xt::pyarray<double>;\n            using row_major_type = xt::pyarray<double, xt::layout_type::row_major>;\n            using column_major_type = xt::pyarray<double, xt::layout_type::column_major>;\n\n            dynamic_type d1 = {{0., 1.}, {0., 10.}, {0., 100.}};\n            dynamic_type d2 = {{0., 2.}, {0., 20.}, {0., 200.}};\n\n            row_major_type r1 = {{0., 1.}, {0., 10.}, {0., 100.}};\n            row_major_type r2 = {{0., 2.}, {0., 20.}, {0., 200.}};\n\n            column_major_type c1 = {{0., 1.}, {0., 10.}, {0., 100.}};\n            column_major_type c2 = {{0., 2.}, {0., 20.}, {0., 200.}};\n\n            template <class T>\n            bool test_has_strides(T const&)\n            {\n                return xt::has_strides<T>::value;\n            }\n\n            template <class T>\n            xt::layout_type test_result_layout(T const& a1, T const& a2)\n            {\n                auto tmp1 = pow(sin((a2 - a1) / 2.), 2.);\n                auto tmp2 = cos(a1);\n                return (tmp1 + tmp2).layout();\n            }\n\n            template <class T>\n            bool test_linear_assign(T const& a1, T const& a2)\n            {\n                auto tmp1 = pow(sin((a2 - a1) / 2.), 2.);\n                auto tmp2 = cos(a1);\n                T res = tmp1 + tmp2;\n                return xt::xassign_traits<T, decltype(tmp1 + tmp2)>::linear_assign(res, tmp1 + tmp2, true);\n            }\n\n            template <class T>\n            bool test_static_simd_linear_assign(T const& a1, T const& a2)\n            {\n                auto tmp1 = pow(sin((a2 - a1) / 2.), 2.);\n                auto tmp2 = cos(a1);\n                return xt::xassign_traits<T, decltype(tmp2)>::simd_linear_assign();\n            }\n\n            template <class T>\n            bool test_dynamic_simd_linear_assign(T const& a1, T const& a2)\n            {\n                auto tmp1 = pow(sin((a2 - a1) / 2.), 2.);\n                auto tmp2 = cos(a1);\n                return xt::xassign_traits<T, decltype(tmp2)>::simd_linear_assign(a1, tmp2);\n            }\n\n            template <class T>\n            bool test_linear_static_layout(T const& a1, T const& a2)\n            {\n                auto tmp1 = pow(sin((a2 - a1) / 2.), 2.);\n                auto tmp2 = cos(a1);\n                return xt::detail::linear_static_layout<decltype(tmp1), decltype(tmp2)>();\n            }\n\n            template <class T>\n            bool test_contiguous_layout(T const& a1, T const& a2)\n            {\n                auto tmp1 = pow(sin((a2 - a1) / 2.), 2.);\n                auto tmp2 = cos(a1);\n                return decltype(tmp1)::contiguous_layout && decltype(tmp2)::contiguous_layout;\n            }\n        };\n\n        TEST_F(pyarray_traits, result_layout)\n        {\n            EXPECT_TRUE(d1.layout() == layout_type::row_major);\n            EXPECT_TRUE(test_result_layout(d1, d2) == layout_type::row_major);\n\n            EXPECT_TRUE(r1.layout() == layout_type::row_major);\n            EXPECT_TRUE(test_result_layout(r1, r2) == layout_type::row_major);\n\n            EXPECT_TRUE(c1.layout() == layout_type::column_major);\n            EXPECT_TRUE(test_result_layout(c1, c2) == layout_type::column_major);\n        }\n\n        TEST_F(pyarray_traits, has_strides)\n        {\n            EXPECT_TRUE(test_has_strides(d1));\n            EXPECT_TRUE(test_has_strides(r1));\n            EXPECT_TRUE(test_has_strides(c1));\n        }\n\n        TEST_F(pyarray_traits, has_linear_assign)\n        {\n            EXPECT_TRUE(d2.has_linear_assign(d1.strides()));\n            EXPECT_TRUE(r2.has_linear_assign(r1.strides()));\n            EXPECT_TRUE(c2.has_linear_assign(c1.strides()));\n        }\n\n        TEST_F(pyarray_traits, linear_assign)\n        {\n            EXPECT_TRUE(test_linear_assign(d1, d2));\n            EXPECT_TRUE(test_linear_assign(r1, r2));\n            EXPECT_TRUE(test_linear_assign(c1, c2));\n        }\n\n        TEST_F(pyarray_traits, static_simd_linear_assign)\n        {\n#ifdef XTENSOR_USE_XSIMD\n            EXPECT_FALSE(test_static_simd_linear_assign(d1, d2));\n            EXPECT_TRUE(test_static_simd_linear_assign(r1, r2));\n            EXPECT_TRUE(test_static_simd_linear_assign(c1, c2));\n#else\n            EXPECT_FALSE(test_static_simd_linear_assign(d1, d2));\n            EXPECT_FALSE(test_static_simd_linear_assign(r1, r2));\n            EXPECT_FALSE(test_static_simd_linear_assign(c1, c2));\n#endif\n        }\n\n        TEST_F(pyarray_traits, dynamic_simd_linear_assign)\n        {\n#ifdef XTENSOR_USE_XSIMD\n            EXPECT_TRUE(test_dynamic_simd_linear_assign(d1, d2));\n            EXPECT_TRUE(test_dynamic_simd_linear_assign(r1, r2));\n            EXPECT_TRUE(test_dynamic_simd_linear_assign(c1, c2));\n#else\n            EXPECT_FALSE(test_dynamic_simd_linear_assign(d1, d2));\n            EXPECT_FALSE(test_dynamic_simd_linear_assign(r1, r2));\n            EXPECT_FALSE(test_dynamic_simd_linear_assign(c1, c2));\n#endif\n        }\n\n        TEST_F(pyarray_traits, linear_static_layout)\n        {\n            EXPECT_FALSE(test_linear_static_layout(d1, d2));\n            EXPECT_TRUE(test_linear_static_layout(r1, r2));\n            EXPECT_TRUE(test_linear_static_layout(c1, c2));\n        }\n\n        TEST_F(pyarray_traits, contiguous_layout)\n        {\n            EXPECT_FALSE(test_contiguous_layout(d1, d2));\n            EXPECT_TRUE(test_contiguous_layout(r1, r2));\n            EXPECT_TRUE(test_contiguous_layout(c1, c2));\n        }\n    }\n}\n"
  },
  {
    "path": "test/test_pytensor.cpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#include \"gtest/gtest.h\"\n\n#include \"xtensor-python/pytensor.hpp\"\n\n#include \"xtensor/containers/xtensor.hpp\"\n#include \"xtensor/views/xview.hpp\"\n\n#include \"test_common.hpp\"\n\nnamespace xt\n{\n    using container_type = std::array<npy_intp, 3>;\n\n    TEST(pytensor, initializer_constructor)\n    {\n        pytensor<int, 3> t\n          {{{ 0,  1,  2},\n            { 3,  4,  5},\n            { 6,  7,  8}},\n           {{ 9, 10, 11},\n            {12, 13, 14},\n            {15, 16, 17}}};\n        EXPECT_EQ(t.dimension(), 3);\n        EXPECT_EQ(t(0, 0, 1), 1);\n        EXPECT_EQ(t.shape()[0], 2);\n    }\n\n    TEST(pytensor, shaped_constructor)\n    {\n        {\n            SCOPED_TRACE(\"row_major constructor\");\n            row_major_result<container_type> rm;\n            pytensor<int, 3> ra(rm.m_shape);\n            compare_shape(ra, rm);\n            EXPECT_EQ(layout_type::row_major, ra.layout());\n        }\n\n        {\n            SCOPED_TRACE(\"column_major constructor\");\n            column_major_result<container_type> cm;\n            pytensor<int, 3> ca(cm.m_shape, layout_type::column_major);\n            compare_shape(ca, cm);\n            EXPECT_EQ(layout_type::column_major, ca.layout());\n        }\n    }\n\n    TEST(pytensor, from_shape)\n    {\n        auto arr = pytensor<double, 3>::from_shape({5, 2, 6});\n        auto exp_shape = std::vector<std::size_t>{5, 2, 6};\n        EXPECT_TRUE(std::equal(arr.shape().begin(), arr.shape().end(), exp_shape.begin()));\n        EXPECT_EQ(arr.shape().size(), 3);\n        EXPECT_EQ(arr.size(), 5 * 2 * 6);\n        using pyt3 = pytensor<double, 3>;\n        std::vector<std::size_t> shp = std::vector<std::size_t>{5, 2};\n        EXPECT_THROW(pyt3::from_shape(shp), std::runtime_error);\n    }\n\n    TEST(pytensor, scalar_from_shape)\n    {\n        std::array<size_t, 0> shape;\n        auto a = pytensor<double, 0>::from_shape(shape);\n        pytensor<double, 0> b(1.2);\n        EXPECT_TRUE(a.size() == b.size());\n        EXPECT_TRUE(xt::has_shape(a, b.shape()));\n    }\n\n    TEST(pytensor, strided_constructor)\n    {\n        central_major_result<container_type> cmr;\n        pytensor<int, 3> cma(cmr.m_shape, cmr.m_strides);\n        compare_shape(cma, cmr);\n    }\n\n    TEST(pytensor, valued_constructor)\n    {\n        {\n            SCOPED_TRACE(\"row_major valued constructor\");\n            row_major_result<container_type> rm;\n            int value = 2;\n            pytensor<int, 3> ra(rm.m_shape, value);\n            compare_shape(ra, rm);\n            std::vector<int> vec(ra.size(), value);\n            EXPECT_TRUE(std::equal(vec.cbegin(), vec.cend(), ra.storage().cbegin()));\n        }\n\n        {\n            SCOPED_TRACE(\"column_major valued constructor\");\n            column_major_result<container_type> cm;\n            int value = 2;\n            pytensor<int, 3> ca(cm.m_shape, value, layout_type::column_major);\n            compare_shape(ca, cm);\n            std::vector<int> vec(ca.size(), value);\n            EXPECT_TRUE(std::equal(vec.cbegin(), vec.cend(), ca.storage().cbegin()));\n        }\n    }\n\n    TEST(pytensor, strided_valued_constructor)\n    {\n        central_major_result<container_type> cmr;\n        int value = 2;\n        pytensor<int, 3> cma(cmr.m_shape, cmr.m_strides, value);\n        compare_shape(cma, cmr);\n        std::vector<int> vec(cma.size(), value);\n        EXPECT_TRUE(std::equal(vec.cbegin(), vec.cend(), cma.storage().cbegin()));\n    }\n\n    TEST(pytensor, copy_semantic)\n    {\n        central_major_result<container_type> res;\n        int value = 2;\n        pytensor<int, 3> a(res.m_shape, res.m_strides, value);\n\n        {\n            SCOPED_TRACE(\"copy constructor\");\n            pytensor<int, 3> b(a);\n            compare_shape(a, b);\n            EXPECT_EQ(a.storage(), b.storage());\n            a.data()[0] += 1;\n            EXPECT_NE(a.storage()[0], b.storage()[0]);\n        }\n\n        {\n            SCOPED_TRACE(\"assignment operator\");\n            row_major_result<container_type> r;\n            pytensor<int, 3> c(r.m_shape, 0);\n            EXPECT_NE(a.data(), c.data());\n            c = a;\n            compare_shape(a, c);\n            EXPECT_EQ(a.storage(), c.storage());\n            a.data()[0] += 1;\n            EXPECT_NE(a.storage()[0], c.storage()[0]);\n        }\n    }\n\n    TEST(pytensor, move_semantic)\n    {\n        central_major_result<container_type> res;\n        int value = 2;\n        pytensor<int, 3> a(res.m_shape, res.m_strides, value);\n\n        {\n            SCOPED_TRACE(\"move constructor\");\n            pytensor<int, 3> tmp(a);\n            pytensor<int, 3> b(std::move(tmp));\n            compare_shape(a, b);\n            EXPECT_EQ(a.storage(), b.storage());\n        }\n\n        {\n            SCOPED_TRACE(\"move assignment\");\n            row_major_result<container_type> r;\n            pytensor<int, 3> c(r.m_shape, 0);\n            EXPECT_NE(a.storage(), c.storage());\n            pytensor<int, 3> tmp(a);\n            c = std::move(tmp);\n            compare_shape(a, c);\n            EXPECT_EQ(a.storage(), c.storage());\n        }\n    }\n\n    TEST(pytensor, extended_constructor)\n    {\n        xt::xtensor<int, 2> a1 = { {1, 2}, {3, 4} };\n        xt::xtensor<int, 2> a2 = { {1, 2}, {3, 4} };\n        pytensor<int, 2> c = a1 + a2;\n        EXPECT_EQ(c(0, 0), a1(0, 0) + a2(0, 0));\n        EXPECT_EQ(c(0, 1), a1(0, 1) + a2(0, 1));\n        EXPECT_EQ(c(1, 0), a1(1, 0) + a2(1, 0));\n        EXPECT_EQ(c(1, 1), a1(1, 1) + a2(1, 1));\n    }\n\n    TEST(pytensor, resize)\n    {\n        pytensor<int, 3> a;\n        test_resize<pytensor<int, 3>, container_type>(a);\n\n        pytensor<int, 3> b = { { { 1, 2 },{ 3, 4 } } };\n        a.resize(b.shape());\n        EXPECT_EQ(a.shape(), b.shape());\n    }\n\n    TEST(pytensor, transpose)\n    {\n        pytensor<int, 3> a;\n        test_transpose<pytensor<int, 3>, container_type>(a);\n    }\n\n    TEST(pytensor, access)\n    {\n        pytensor<int, 3> a;\n        test_access<pytensor<int, 3>, container_type>(a);\n    }\n\n    TEST(pytensor, indexed_access)\n    {\n        pytensor<int, 3> a;\n        test_indexed_access<pytensor<int, 3>, container_type>(a);\n    }\n\n    TEST(pytensor, broadcast_shape)\n    {\n        pytensor<int, 4> a;\n        test_broadcast(a);\n    }\n\n    TEST(pytensor, iterator)\n    {\n        pytensor<int, 3> a;\n        pytensor<int, 3> b;\n        test_iterator<pytensor<int, 3>, pytensor<int, 3>, container_type>(a, b);\n\n        pytensor<int, 3, layout_type::row_major> c;\n        bool truthy = std::is_same<decltype(c.begin()), int*>::value;\n        EXPECT_TRUE(truthy);\n    }\n\n    TEST(pytensor, zerod)\n    {\n        pytensor<int, 3> a;\n        EXPECT_EQ(0, a());\n    }\n\n    TEST(pytensor, reshape)\n    {\n        pytensor<int, 2> a = {{1,2,3}, {4,5,6}};\n        auto ptr = a.data();\n        a.reshape(a.shape()); // compilation check\n        a.reshape({1, 6});\n        EXPECT_EQ(ptr, a.data());\n        EXPECT_THROW(a.reshape(std::vector<std::size_t>{6}), std::runtime_error);\n        // note this throws because std array has only 1 element initialized\n        // and the second element is `0`.\n        EXPECT_THROW(a.reshape({6, 5}), std::runtime_error);\n    }\n\n    TEST(pytensor, view)\n    {\n        xt::pytensor<int, 1> arr = xt::zeros<int>({ 10 });\n        auto v = xt::view(arr, xt::all());\n        EXPECT_EQ(v(0), 0.);\n    }\n\n    TEST(pytensor, unary)\n    {\n        pytensor<int, 1> a = { 1, 2, 3 };\n        pytensor<int, 1> res = -a;\n        pytensor<int, 1> ref = { -1, -2, -3 };\n        EXPECT_EQ(ref(0), res(0));\n        EXPECT_EQ(ref(1), res(1));\n        EXPECT_EQ(ref(1), res(1));\n    }\n\n    TEST(pytensor, inplace_pybind11_overload)\n    {\n        // pybind11 overrrides a number of operators in pybind11::object.\n        // This is testing that the right overload is picked up.\n        pytensor<double, 1> a = { 1.0, 2.0, 3.0 };\n        a /= 2;\n        pytensor<double, 1> ref = { 0.5, 1.0, 1.5 };\n        EXPECT_EQ(ref(0), a(0));\n        EXPECT_EQ(ref(1), a(1));\n        EXPECT_EQ(ref(1), a(1));\n    }\n}\n"
  },
  {
    "path": "test/test_pyvectorize.cpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#include \"gtest/gtest.h\"\n#include \"test_common.hpp\"\n#include \"xtensor-python/pytensor.hpp\"\n#include \"xtensor-python/pyvectorize.hpp\"\n#include \"pybind11/pybind11.h\"\n#include \"pybind11/numpy.h\"\n\nnamespace xt\n{\n\n    double f1(double a, double b)\n    {\n        return a + b;\n    }\n\n    using shape_type = std::vector<std::size_t>;\n\n    TEST(pyvectorize, function)\n    {\n        auto vecf1 = pyvectorize(f1);\n        shape_type shape = { 3, 2 };\n        pyarray<double> a(shape, 1.5);\n        pyarray<double> b(shape, 2.3);\n        pyarray<double> c = vecf1(a, b);\n        EXPECT_EQ(a(0, 0) + b(0, 0), c(0, 0));\n    }\n\n    TEST(pyvectorize, lambda)\n    {\n        auto vecf1 = pyvectorize([](double a, double b) { return a + b; });\n        shape_type shape = { 3, 2 };\n        pyarray<double> a(shape, 1.5);\n        pyarray<double> b(shape, 2.3);\n        pyarray<double> c = vecf1(a, b);\n        EXPECT_EQ(a(0, 0) + b(0, 0), c(0, 0));\n    }\n\n    TEST(pyvectorize, complex)\n    {\n        using complex_t = std::complex<double>;\n        shape_type shape = { 3, 2 };\n        pyarray<complex_t> a(shape, complex_t(1.2, 2.5));\n        auto f = pyvectorize([](complex_t x) { return std::abs(x); });\n        auto res = f(a);\n        double exp = std::abs(a(1, 1));\n        EXPECT_EQ(exp, res(1, 1));\n    }\n}\n"
  },
  {
    "path": "test/test_sfinae.cpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#include <limits>\n\n#include \"gtest/gtest.h\"\n#include \"xtensor-python/pytensor.hpp\"\n#include \"xtensor-python/pyarray.hpp\"\n#include \"xtensor/containers/xarray.hpp\"\n#include \"xtensor/containers/xtensor.hpp\"\n\nnamespace xt\n{\n    template <class E, std::enable_if_t<!xt::has_fixed_rank_t<E>::value, int> = 0>\n    inline bool sfinae_has_fixed_rank(E&&)\n    {\n        return false;\n    }\n\n    template <class E, std::enable_if_t<xt::has_fixed_rank_t<E>::value, int> = 0>\n    inline bool sfinae_has_fixed_rank(E&&)\n    {\n        return true;\n    }\n\n    TEST(sfinae, fixed_rank)\n    {\n        xt::pyarray<size_t> a = {{9, 9, 9}, {9, 9, 9}};\n        xt::pytensor<size_t, 1> b = {9, 9};\n        xt::pytensor<size_t, 2> c = {{9, 9}, {9, 9}};\n\n        EXPECT_TRUE(sfinae_has_fixed_rank(a) == false);\n        EXPECT_TRUE(sfinae_has_fixed_rank(b) == true);\n        EXPECT_TRUE(sfinae_has_fixed_rank(c) == true);\n    }\n\n    TEST(sfinae, get_rank)\n    {\n        xt::pytensor<double, 1> A = xt::zeros<double>({2});\n        xt::pytensor<double, 2> B = xt::zeros<double>({2, 2});\n        xt::pyarray<double> C = xt::zeros<double>({2, 2});\n\n        EXPECT_TRUE(xt::get_rank<decltype(A)>::value == 1ul);\n        EXPECT_TRUE(xt::get_rank<decltype(B)>::value == 2ul);\n        EXPECT_TRUE(xt::get_rank<decltype(C)>::value == SIZE_MAX);\n    }\n}\n"
  },
  {
    "path": "test_python/main.cpp",
    "content": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          *\n* Copyright (c) QuantStack                                                 *\n*                                                                          *\n* Distributed under the terms of the BSD 3-Clause License.                 *\n*                                                                          *\n* The full license is in the file LICENSE, distributed with this software. *\n****************************************************************************/\n\n#include <numeric>\n\n#include \"xtensor/core/xmath.hpp\"\n#include \"xtensor/containers/xarray.hpp\"\n#include \"xtensor/containers/xfixed.hpp\"\n#define FORCE_IMPORT_ARRAY\n#include \"xtensor-python/pyarray.hpp\"\n#include \"xtensor-python/pytensor.hpp\"\n#include \"xtensor-python/pyvectorize.hpp\"\n#include \"xtensor/containers/xadapt.hpp\"\n#include \"xtensor/views/xstrided_view.hpp\"\n\nnamespace py = pybind11;\nusing complex_t = std::complex<double>;\n\n// Examples\n\ndouble example1(xt::pyarray<double>& m)\n{\n    return m(0);\n}\n\nxt::pyarray<double> example2(xt::pyarray<double>& m)\n{\n    return m + 2;\n}\n\nxt::xarray<int> example3_xarray(const xt::xarray<int>& m)\n{\n    return xt::transpose(m) + 2;\n}\n\nxt::xarray<int, xt::layout_type::column_major> example3_xarray_colmajor(\n    const xt::xarray<int, xt::layout_type::column_major>& m)\n{\n    return xt::transpose(m) + 2;\n}\n\nxt::xtensor<int, 3> example3_xtensor3(const xt::xtensor<int, 3>& m)\n{\n    return xt::transpose(m) + 2;\n}\n\nxt::xtensor<int, 2> example3_xtensor2(const xt::xtensor<int, 2>& m)\n{\n    return xt::transpose(m) + 2;\n}\n\nxt::xtensor<int, 2, xt::layout_type::column_major> example3_xtensor2_colmajor(\n    const xt::xtensor<int, 2, xt::layout_type::column_major>& m)\n{\n    return xt::transpose(m) + 2;\n}\n\nxt::xtensor_fixed<int, xt::xshape<4, 3, 2>> example3_xfixed3(const xt::xtensor_fixed<int, xt::xshape<2, 3, 4>>& m)\n{\n    return xt::transpose(m) + 2;\n}\n\nxt::xtensor_fixed<int, xt::xshape<3, 2>> example3_xfixed2(const xt::xtensor_fixed<int, xt::xshape<2, 3>>& m)\n{\n    return xt::transpose(m) + 2;\n}\n\nxt::xtensor_fixed<int, xt::xshape<3, 2>, xt::layout_type::column_major> example3_xfixed2_colmajor(\n    const xt::xtensor_fixed<int, xt::xshape<2, 3>, xt::layout_type::column_major>& m)\n{\n    return xt::transpose(m) + 2;\n}\n\n// Readme Examples\n\ndouble readme_example1(xt::pyarray<double>& m)\n{\n    auto sines = xt::sin(m);\n    return std::accumulate(sines.cbegin(), sines.cend(), 0.0);\n}\n\ndouble readme_example2(double i, double j)\n{\n    return std::sin(i) -  std::cos(j);\n}\n\nauto complex_overload(const xt::pyarray<std::complex<double>>& a)\n{\n    return a;\n}\nauto no_complex_overload(const xt::pyarray<double>& a)\n{\n    return a;\n}\n\nauto complex_overload_reg(const std::complex<double>& a)\n{\n    return a;\n}\n\nauto no_complex_overload_reg(const double& a)\n{\n    return a;\n}\n//\n// Operator examples\n//\nxt::pyarray<double> array_addition(const xt::pyarray<double>& m, const xt::pyarray<double>& n)\n{\n    return m + n;\n}\n\nxt::pyarray<double> array_subtraction(xt::pyarray<double>& m, xt::pyarray<double>& n)\n{\n    return m - n;\n}\n\nxt::pyarray<double> array_multiplication(xt::pyarray<double>& m, xt::pyarray<double>& n)\n{\n    return m * n;\n}\n\nxt::pyarray<double> array_division(xt::pyarray<double>& m, xt::pyarray<double>& n)\n{\n    return m / n;\n}\n\n// Vectorize Examples\n\nint add(int i, int j)\n{\n    return i + j;\n}\n\ntemplate <class T> std::string typestring() { return \"Unknown\"; }\ntemplate <> std::string typestring<uint8_t>() { return \"uint8\"; }\ntemplate <> std::string typestring<int8_t>() { return \"int8\"; }\ntemplate <> std::string typestring<uint16_t>() { return \"uint16\"; }\ntemplate <> std::string typestring<int16_t>() { return \"int16\"; }\ntemplate <> std::string typestring<uint32_t>() { return \"uint32\"; }\ntemplate <> std::string typestring<int32_t>() { return \"int32\"; }\ntemplate <> std::string typestring<uint64_t>() { return \"uint64\"; }\ntemplate <> std::string typestring<int64_t>() { return \"int64\"; }\n\ntemplate <class T>\ninline std::string int_overload(xt::pyarray<T>& m)\n{\n    return typestring<T>();\n}\n\nvoid dump_numpy_constant()\n{\n    std::cout << \"NPY_BOOL = \" << NPY_BOOL << std::endl;\n    std::cout << \"NPY_BYTE = \" << NPY_BYTE << std::endl;\n    std::cout << \"NPY_UBYTE = \" << NPY_UBYTE << std::endl;\n    std::cout << \"NPY_INT8 = \" << NPY_INT8 << std::endl;\n    std::cout << \"NPY_UINT8 = \" << NPY_UINT8 << std::endl;\n    std::cout << \"NPY_SHORT = \" << NPY_SHORT << std::endl;\n    std::cout << \"NPY_USHORT = \" << NPY_USHORT << std::endl;\n    std::cout << \"NPY_INT16 = \" << NPY_INT16 << std::endl;\n    std::cout << \"NPY_UINT16 = \" << NPY_UINT16 << std::endl;\n    std::cout << \"NPY_INT = \" << NPY_INT << std::endl;\n    std::cout << \"NPY_UINT = \" << NPY_UINT << std::endl;\n    std::cout << \"NPY_INT32 = \" << NPY_INT32 << std::endl;\n    std::cout << \"NPY_UINT32 = \" << NPY_UINT32 << std::endl;\n    std::cout << \"NPY_LONG = \" << NPY_LONG << std::endl;\n    std::cout << \"NPY_ULONG = \" << NPY_ULONG << std::endl;\n    std::cout << \"NPY_LONGLONG = \" << NPY_LONGLONG << std::endl;\n    std::cout << \"NPY_ULONGLONG = \" << NPY_ULONGLONG << std::endl;\n    std::cout << \"NPY_INT64 = \" << NPY_INT64 << std::endl;\n    std::cout << \"NPY_UINT64 = \" << NPY_UINT64 << std::endl;\n}\n\nstruct A\n{\n    double a;\n    int b;\n    char c;\n    std::array<double, 3> x;\n};\n\nstruct B\n{\n    double a;\n    int b;\n};\n\nclass C\n{\npublic:\n    using array_type = xt::xarray<double, xt::layout_type::row_major>;\n    C() : m_array{0, 0, 0, 0} {}\n    array_type & array() { return m_array; }\nprivate:\n    array_type m_array;\n};\n\nstruct test_native_casters\n{\n    using array_type = xt::xarray<double>;\n    array_type a = xt::ones<double>({50, 50});\n\n    const auto & get_array()\n    {\n        return a;\n    }\n\n    auto get_strided_view()\n    {\n        return xt::strided_view(a, {xt::range(0, 1), xt::range(0, 3, 2)});\n    }\n\n    auto get_array_adapter()\n    {\n        using shape_type = std::vector<size_t>;\n        shape_type shape = {2, 2};\n        shape_type stride = {3, 2};\n        return xt::adapt(a.data(), 4, xt::no_ownership(), shape, stride);\n    }\n\n    auto get_tensor_adapter()\n    {\n        using shape_type = std::array<size_t, 2>;\n        shape_type shape = {2, 2};\n        shape_type stride = {3, 2};\n        return xt::adapt(a.data(), 4, xt::no_ownership(), shape, stride);\n    }\n\n    auto get_owning_array_adapter()\n    {\n        size_t size = 100;\n        int * data = new int[size];\n        std::fill(data, data + size, 1);\n\n        using shape_type = std::vector<size_t>;\n        shape_type shape = {size};\n        return xt::adapt(std::move(data), size, xt::acquire_ownership(), shape);\n    }\n};\n\nxt::pyarray<A> dtype_to_python()\n{\n    A a1{123, 321, 'a', {1, 2, 3}};\n    A a2{111, 222, 'x', {5, 5, 5}};\n\n    return xt::pyarray<A>({a1, a2});\n}\n\nxt::pyarray<B> dtype_from_python(xt::pyarray<B>& b)\n{\n    if (b(0).a != 1 || b(0).b != 'p' || b(1).a != 123 || b(1).b != 'c')\n    {\n        throw std::runtime_error(\"FAIL\");\n    }\n\n    b(0).a = 123.;\n    b(0).b = 'w';\n    return b;\n}\n\nvoid char_array(xt::pyarray<char[20]>& carr)\n{\n    if (strcmp(carr(2), \"python\"))\n    {\n        throw std::runtime_error(\"TEST FAILED!\");\n    }\n    std::fill(&carr(2)[0], &carr(2)[0] + 20, 0);\n    carr(2)[0] = 'c';\n    carr(2)[1] = '+';\n    carr(2)[2] = '+';\n    carr(2)[3] = '\\0';\n}\n\nvoid row_major_tensor(xt::pytensor<double, 3, xt::layout_type::row_major>& arg)\n{\n    if (!std::is_same<decltype(arg.begin()), double*>::value)\n    {\n        throw std::runtime_error(\"TEST FAILED\");\n    }\n}\n\nvoid col_major_array(xt::pyarray<double, xt::layout_type::column_major>& arg)\n{\n    if (!std::is_same<decltype(arg.template begin<xt::layout_type::column_major>()), double*>::value)\n    {\n        throw std::runtime_error(\"TEST FAILED\");\n    }\n}\n\nxt::pytensor<int, 0> xscalar(const xt::pytensor<int, 1>& arg)\n{\n    return xt::sum(arg);\n}\n\ntemplate <class T>\nusing ndarray = xt::pyarray<T, xt::layout_type::row_major>;\n\nvoid test_rm(ndarray<int>const& x)\n{\n    ndarray<int> y = x;\n    ndarray<int> z = xt::zeros<int>({10});\n}\n\nPYBIND11_MODULE(xtensor_python_test, m)\n{\n    xt::import_numpy();\n\n    m.doc() = \"Test module for xtensor python bindings\";\n\n    m.def(\"example1\", example1);\n    m.def(\"example2\", example2);\n    m.def(\"example3_xarray\", example3_xarray);\n    m.def(\"example3_xarray_colmajor\", example3_xarray_colmajor);\n    m.def(\"example3_xtensor3\", example3_xtensor3);\n    m.def(\"example3_xtensor2\", example3_xtensor2);\n    m.def(\"example3_xtensor2_colmajor\", example3_xtensor2_colmajor);\n    m.def(\"example3_xfixed3\", example3_xfixed3);\n    m.def(\"example3_xfixed2\", example3_xfixed2);\n    m.def(\"example3_xfixed2_colmajor\", example3_xfixed2_colmajor);\n\n    m.def(\"complex_overload\", no_complex_overload);\n    m.def(\"complex_overload\", complex_overload);\n    m.def(\"complex_overload_reg\", no_complex_overload_reg);\n    m.def(\"complex_overload_reg\", complex_overload_reg);\n\n    m.def(\"readme_example1\", readme_example1);\n    m.def(\"readme_example2\", xt::pyvectorize(readme_example2));\n\n    m.def(\"array_addition\", array_addition);\n    m.def(\"array_subtraction\", array_subtraction);\n    m.def(\"array_multiplication\", array_multiplication);\n    m.def(\"array_division\", array_division);\n\n    m.def(\"vectorize_example1\", xt::pyvectorize(add));\n\n    m.def(\"rect_to_polar\", xt::pyvectorize([](complex_t x) { return std::abs(x); }));\n\n    m.def(\"compare_shapes\", [](const xt::pyarray<double>& a, const xt::pyarray<double>& b) {\n        return a.shape() == b.shape();\n    });\n\n    m.def(\"test_rm\", test_rm);\n\n    m.def(\"int_overload\", int_overload<uint8_t>);\n    m.def(\"int_overload\", int_overload<int8_t>);\n    m.def(\"int_overload\", int_overload<uint16_t>);\n    m.def(\"int_overload\", int_overload<int16_t>);\n    m.def(\"int_overload\", int_overload<uint32_t>);\n    m.def(\"int_overload\", int_overload<int32_t>);\n    m.def(\"int_overload\", int_overload<uint64_t>);\n    m.def(\"int_overload\", int_overload<int64_t>);\n\n    m.def(\"dump_numpy_constant\", dump_numpy_constant);\n\n    // Register additional dtypes\n    PYBIND11_NUMPY_DTYPE(A, a, b, c, x);\n    PYBIND11_NUMPY_DTYPE(B, a, b);\n\n    m.def(\"dtype_to_python\", dtype_to_python);\n    m.def(\"dtype_from_python\", dtype_from_python);\n    m.def(\"char_array\", char_array);\n\n    m.def(\"col_major_array\", col_major_array);\n    m.def(\"row_major_tensor\", row_major_tensor);\n\n    m.def(\"xscalar\", xscalar);\n\n    py::class_<C>(m, \"C\")\n        .def(py::init<>())\n        .def_property_readonly(\n            \"copy\",\n            [](C & self) { return self.array(); }\n        )\n        .def_property_readonly(\n            \"ref\",\n            [](C & self) -> C::array_type & { return self.array(); }\n        )\n    ;\n\n    m.def(\"simple_array\", [](xt::pyarray<int>) { return 1; } );\n    m.def(\"simple_tensor\", [](xt::pytensor<int, 1>) { return 2; } );\n\n    m.def(\"diff_shape_overload\", [](xt::pytensor<int, 1> a) { return 1; });\n    m.def(\"diff_shape_overload\", [](xt::pytensor<int, 2> a) { return 2; });\n\n    py::class_<test_native_casters>(m, \"test_native_casters\")\n            .def(py::init<>())\n            .def(\"get_array\", &test_native_casters::get_array, py::return_value_policy::reference_internal) // memory managed by the class instance\n            .def(\"get_strided_view\", &test_native_casters::get_strided_view, py::keep_alive<0, 1>())        // keep_alive<0, 1>() => do not free \"self\" before the returned view\n            .def(\"get_array_adapter\", &test_native_casters::get_array_adapter, py::keep_alive<0, 1>())      // keep_alive<0, 1>() => do not free \"self\" before the returned adapter\n            .def(\"get_tensor_adapter\", &test_native_casters::get_tensor_adapter, py::keep_alive<0, 1>())    // keep_alive<0, 1>() => do not free \"self\" before the returned adapter\n            .def(\"get_owning_array_adapter\", &test_native_casters::get_owning_array_adapter)                // auto memory management as the adapter owns its memory\n            .def(\"view_keep_alive_member_function\", [](test_native_casters & self, xt::pyarray<double> & a) // keep_alive<0, 2>() => do not free second parameter before the returned view\n                    {return xt::reshape_view(a, {a.size(), });},\n                    py::keep_alive<0, 2>());\n}\n"
  },
  {
    "path": "test_python/setup.py",
    "content": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          #\n# Copyright (c) QuantStack                                                 #\n#                                                                          #\n# Distributed under the terms of the BSD 3-Clause License.                 #\n#                                                                          #\n# The full license is in the file LICENSE, distributed with this software. #\n############################################################################\n\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nimport sys\nimport os\nimport setuptools\n\n__version__ = '0.0.1'\n\n\nclass get_pybind_include(object):\n    \"\"\"Helper class to determine the pybind11 include path\n\n    The purpose of this class is to postpone importing pybind11\n    until it is actually installed, so that the ``get_include()``\n    method can be invoked. \"\"\"\n\n    def __init__(self, user=False):\n        self.user = user\n\n    def __str__(self):\n        import pybind11\n        return pybind11.get_include(self.user)\n\nclass get_numpy_include(object):\n    \"\"\"Helper class to determine the numpy include path\n\n    The purpose of this class is to postpone importing numpy\n    until it is actually installed, so that the ``get_include()``\n    method can be invoked. \"\"\"\n\n    def __str__(self):\n        import numpy\n        return numpy.get_include()\n\next_modules = [\n    Extension(\n        'xtensor_python_test',\n        ['main.cpp'],\n        include_dirs=[\n            # Path to pybind11 headers\n            '../include/',\n            get_pybind_include(),\n            get_pybind_include(user=True),\n            # Path to numpy headers\n            get_numpy_include(),\n            os.path.join(sys.prefix, 'include'),\n            os.path.join(sys.prefix, 'Library', 'include')\n        ],\n        language='c++'\n    ),\n]\n\n\ndef has_flag(compiler, flagname):\n    \"\"\"Return a boolean indicating whether a flag name is supported on\n    the specified compiler.\n    \"\"\"\n    import tempfile\n    with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:\n        f.write('int main (int argc, char **argv) { return 0; }')\n        try:\n            compiler.compile([f.name], extra_postargs=[flagname])\n        except setuptools.distutils.errors.CompileError:\n            return False\n    return True\n\n\ndef cpp_flag(compiler):\n    \"\"\"Return the -std=c++17 compiler flag  and errors when the flag is\n    no available.\n    \"\"\"\n    if has_flag(compiler, '-std=c++20'):\n        return '-std=c++20'\n    else:\n        raise RuntimeError('C++17 support is required by xtensor!')\n\n\nclass BuildExt(build_ext):\n    \"\"\"A custom build extension for adding compiler-specific options.\"\"\"\n    c_opts = {\n        'msvc': ['/EHsc'],\n        'unix': [],\n    }\n\n    if sys.platform == 'darwin':\n        c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.13']\n\n    def build_extensions(self):\n        ct = self.compiler.compiler_type\n        opts = self.c_opts.get(ct, [])\n        if ct == 'unix':\n            opts.append('-DVERSION_INFO=\"%s\"' % self.distribution.get_version())\n            opts.append(cpp_flag(self.compiler))\n            if has_flag(self.compiler, '-fvisibility=hidden'):\n                opts.append('-fvisibility=hidden')\n        elif ct == 'msvc':\n            opts.append('/DVERSION_INFO=\\\\\"%s\\\\\"' % self.distribution.get_version())\n            opts.append('/std:c++20')\n        for ext in self.extensions:\n            ext.extra_compile_args = opts\n        build_ext.build_extensions(self)\n\nsetup(\n    name='xtensor_python_test',\n    version=__version__,\n    author='Sylvain Corlay',\n    author_email='sylvain.corlay@gmail.com',\n    url='https://github.com/pybind/python_example',\n    description='An example project using xtensor-python',\n    long_description='',\n    ext_modules=ext_modules,\n    install_requires=['pybind11>=2.0.1'],\n    cmdclass={'build_ext': BuildExt},\n    zip_safe=False,\n)\n"
  },
  {
    "path": "test_python/test_pyarray.py",
    "content": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          #\n# Copyright (c) QuantStack                                                 #\n#                                                                          #\n# Distributed under the terms of the BSD 3-Clause License.                 #\n#                                                                          #\n# The full license is in the file LICENSE, distributed with this software. #\n############################################################################\n\nimport os\nimport sys\nimport subprocess\n\n# Build the test extension\n\nhere = os.path.abspath(os.path.dirname(__file__))\nsubprocess.check_call([sys.executable, os.path.join(here, 'setup.py'), 'build_ext', '--inplace'], cwd=here)\n\n# Test it!\n\nfrom unittest import TestCase\nimport xtensor_python_test as xt\nimport numpy as np\n\nclass XtensorTest(TestCase):\n    def test_rm(self):\n        xt.test_rm(np.array([10], dtype=int))\n\n    def test_example1(self):\n        self.assertEqual(4, xt.example1([4, 5, 6]))\n\n    def test_example2(self):\n        x = np.array([[0., 1.], [2., 3.]])\n        res = np.array([[2., 3.], [4., 5.]])\n        y = xt.example2(x)\n        np.testing.assert_allclose(y, res, 1e-12)\n\n    def test_example3(self):\n        x = np.arange(2 * 3).reshape(2, 3)\n        xc = np.asfortranarray(x)\n        y = np.arange(2 * 3 * 4).reshape(2, 3, 4)\n        v = y[1:, 1:, 0]\n        z = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5)\n        np.testing.assert_array_equal(xt.example3_xarray(x), x.T + 2)\n        np.testing.assert_array_equal(xt.example3_xarray_colmajor(xc), xc.T + 2)\n        np.testing.assert_array_equal(xt.example3_xtensor3(y), y.T + 2)\n        np.testing.assert_array_equal(xt.example3_xtensor2(x), x.T + 2)\n        np.testing.assert_array_equal(xt.example3_xtensor2(y[1:, 1:, 0]), v.T + 2)\n        np.testing.assert_array_equal(xt.example3_xtensor2_colmajor(xc), xc.T + 2)\n\n        np.testing.assert_array_equal(xt.example3_xfixed3(y), y.T + 2)\n        np.testing.assert_array_equal(xt.example3_xfixed2(x), x.T + 2)\n        np.testing.assert_array_equal(xt.example3_xfixed2_colmajor(xc), xc.T + 2)\n\n        with self.assertRaises(TypeError):\n            xt.example3_xtensor3(x)\n\n        with self.assertRaises(TypeError):\n            xt.example3_xfixed3(x)\n\n        with self.assertRaises(TypeError):\n            x = np.arange(3*2).reshape(3, 2)\n            xt.example3_xfixed2(x)\n    def test_broadcast_addition(self):\n        x = np.array([[2., 3., 4., 5.]])\n        y = np.array([[1., 2., 3., 4.],\n                      [1., 2., 3., 4.],\n                      [1., 2., 3., 4.]])\n        res = np.array([[3., 5., 7., 9.],\n                        [3., 5., 7., 9.],\n                        [3., 5., 7., 9.]])\n        z = xt.array_addition(x, y)\n        np.testing.assert_allclose(z, res, 1e-12)\n    def test_broadcast_subtraction(self):\n        x = np.array([[4., 5., 6., 7.]])\n        y = np.array([[4., 3., 2., 1.],\n                      [4., 3., 2., 1.],\n                      [4., 3., 2., 1.]])\n        res = np.array([[0., 2., 4., 6.],\n                        [0., 2., 4., 6.],\n                        [0., 2., 4., 6.]])\n        z = xt.array_subtraction(x, y)\n        np.testing.assert_allclose(z, res, 1e-12)\n\n    def test_broadcast_multiplication(self):\n        x = np.array([[1., 2., 3., 4.]])\n        y = np.array([[3., 2., 3., 2.],\n                      [3., 2., 3., 2.],\n                      [3., 2., 3., 2.]])\n        res = np.array([[3., 4., 9., 8.],\n                        [3., 4., 9., 8.],\n                        [3., 4., 9., 8.]])\n        z = xt.array_multiplication(x, y)\n        np.testing.assert_allclose(z, res, 1e-12)\n\n    def test_broadcast_division(self):\n        x = np.array([[8., 6., 4., 2.]])\n        y = np.array([[2., 2., 2., 2.],\n                      [2., 2., 2., 2.],\n                      [2., 2., 2., 2.]])\n        res = np.array([[4., 3., 2., 1.],\n                        [4., 3., 2., 1.],\n                        [4., 3., 2., 1.]])\n        z = xt.array_division(x, y)\n        np.testing.assert_allclose(z, res, 1e-12)\n\n    def test_vectorize(self):\n        x1 = np.array([[0, 1], [2, 3]])\n        x2 = np.array([0, 1])\n        res = np.array([[0, 2], [2, 4]])\n        y = xt.vectorize_example1(x1, x2)\n        np.testing.assert_array_equal(y, res)\n\n    def test_readme_example1(self):\n        v = np.arange(15).reshape(3, 5)\n        y = xt.readme_example1(v)\n        np.testing.assert_allclose(y, 1.2853996391883833, 1e-12)\n\n    def test_complex_overload_reg(self):\n        a = 23.23\n        c = 2.0 + 3.1j\n        self.assertEqual(xt.complex_overload_reg(a), a)\n        self.assertEqual(xt.complex_overload_reg(c), c)\n\n    def test_complex_overload(self):\n        a = np.random.rand(3, 3)\n        b = np.random.rand(3, 3)\n        c = a + b * 1j\n        y = xt.complex_overload(c)\n        np.testing.assert_allclose(np.imag(y), np.imag(c))\n        np.testing.assert_allclose(np.real(y), np.real(c))\n        x = xt.complex_overload(b)\n        self.assertEqual(x.dtype, b.dtype)\n        np.testing.assert_allclose(x, b)\n\n    def test_readme_example2(self):\n        x = np.arange(15).reshape(3, 5)\n        y = [1, 2, 3, 4, 5]\n        z = xt.readme_example2(x, y)\n        np.testing.assert_allclose(z,\n            [[-0.540302,  1.257618,  1.89929 ,  0.794764, -1.040465],\n             [-1.499227,  0.136731,  1.646979,  1.643002,  0.128456],\n             [-1.084323, -0.583843,  0.45342 ,  1.073811,  0.706945]], 1e-5)\n\n    def test_rect_to_polar(self):\n        x = np.ones(10, dtype=complex)\n        z = xt.rect_to_polar(x[::2]);\n        np.testing.assert_allclose(z, np.ones(5, dtype=float), 1e-5)\n\n    def test_shape_comparison(self):\n        x = np.ones([4, 4])\n        y = np.ones([5, 5])\n        z = np.zeros([4, 4])\n        self.assertFalse(xt.compare_shapes(x, y))\n        self.assertTrue(xt.compare_shapes(x, z))\n\n    def test_int_overload(self):\n        for dtype in [np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64, np.int64]:\n            b = xt.int_overload(np.ones((10), dtype))\n            self.assertEqual(str(dtype.__name__), b)\n\n    def test_dtype(self):\n        var = xt.dtype_to_python()\n        self.assertEqual(var.dtype.names, ('a', 'b', 'c', 'x'))\n\n        exp_dtype = {\n             'a': (np.dtype('float64'), 0),\n             'b': (np.dtype('int32'), 8),\n             'c': (np.dtype('int8'), 12),\n             'x': (np.dtype(('<f8', (3,))), 16)\n        }\n\n        self.assertEqual(var.dtype.fields, exp_dtype)\n\n        self.assertEqual(var[0]['a'], 123)\n        self.assertEqual(var[0]['b'], 321)\n        self.assertEqual(var[0]['c'], ord('a'))\n        self.assertTrue(np.all(var[0]['x'] == [1, 2, 3]))\n\n        self.assertEqual(var[1]['a'], 111)\n        self.assertEqual(var[1]['b'], 222)\n        self.assertEqual(var[1]['c'], ord('x'))\n        self.assertTrue(np.all(var[1]['x'] == [5, 5, 5]))\n\n        d_dtype = np.dtype({'names':['a','b'], 'formats':['<f8','<i4'], 'offsets':[0,8], 'itemsize':16})\n\n        darr = np.array([(1, ord('p')), (123, ord('c'))], dtype=d_dtype)\n        self.assertEqual(darr[0]['a'], 1)\n        res = xt.dtype_from_python(darr)\n        self.assertEqual(res[0]['a'], 123.)\n        self.assertEqual(darr[0]['a'], 123.)\n\n    def test_char_array(self):\n        var = np.array(['hello', 'from', 'python'], dtype=np.dtype('|S20'));\n        self.assertEqual(var[0], b'hello')\n        xt.char_array(var)\n        self.assertEqual(var[0], b'hello')\n        self.assertEqual(var[1], b'from')\n        self.assertEqual(var[2], b'c++')\n\n    def test_col_row_major(self):\n        var = np.arange(50, dtype=float).reshape(2, 5, 5)\n\n        with self.assertRaises(RuntimeError):\n            xt.col_major_array(var)\n\n        with self.assertRaises(TypeError):\n            xt.row_major_tensor(var.T)\n\n        with self.assertRaises(TypeError):\n            xt.row_major_tensor(var[:, ::2, ::2])\n\n        with self.assertRaises(TypeError):\n            # raise for wrong dimension\n            xt.row_major_tensor(var[0, 0, :])\n\n        xt.row_major_tensor(var)\n        varF = np.arange(50, dtype=float).reshape(2, 5, 5, order='F')\n        xt.col_major_array(varF)\n        xt.col_major_array(varF[:, :, 0]) # still col major!\n\n    def test_xscalar(self):\n        var = np.arange(50, dtype=int)\n        self.assertTrue(np.sum(var) == xt.xscalar(var))\n\n    def test_bad_argument_call(self):\n        with self.assertRaises(TypeError):\n            xt.simple_array(\"foo\")\n\n        with self.assertRaises(TypeError):\n            xt.simple_tensor(\"foo\")\n\n    def test_diff_shape_overload(self):\n        self.assertEqual(1, xt.diff_shape_overload(np.ones(2)))\n        self.assertEqual(2, xt.diff_shape_overload(np.ones((2, 2))))\n\n        with self.assertRaises(TypeError):\n            # FIXME: the TypeError information is not informative\n            xt.diff_shape_overload(np.ones((2, 2, 2)))\n\n    def test_native_casters(self):\n        import gc\n\n        # check keep alive policy for get_strided_view()\n        gc.collect()\n        obj = xt.test_native_casters()\n        a = obj.get_strided_view()\n        obj = None\n        gc.collect()\n        _ = np.zeros((100, 100))\n        self.assertEqual(a.sum(), a.size)\n\n        # check keep alive policy for get_array_adapter()\n        gc.collect()\n        obj = xt.test_native_casters()\n        a = obj.get_array_adapter()\n        obj = None\n        gc.collect()\n        _ = np.zeros((100, 100))\n        self.assertEqual(a.sum(), a.size)\n\n        # check keep alive policy for get_array_adapter()\n        gc.collect()\n        obj = xt.test_native_casters()\n        a = obj.get_tensor_adapter()\n        obj = None\n        gc.collect()\n        _ = np.zeros((100, 100))\n        self.assertEqual(a.sum(), a.size)\n\n        # check keep alive policy for get_owning_array_adapter()\n        gc.collect()\n        obj = xt.test_native_casters()\n        a = obj.get_owning_array_adapter()\n        gc.collect()\n        _ = np.zeros((100, 100))\n        self.assertEqual(a.sum(), a.size)\n\n        # check keep alive policy for view_keep_alive_member_function()\n        gc.collect()\n        a = np.ones((100, 100))\n        b = obj.view_keep_alive_member_function(a)\n        obj = None\n        a = None\n        gc.collect()\n        _ = np.zeros((100, 100))\n        self.assertEqual(b.sum(), b.size)\n\n        # check shared buffer (insure that no copy is done)\n        obj = xt.test_native_casters()\n        arr = obj.get_array()\n\n        strided_view = obj.get_strided_view()\n        strided_view[0, 1] = -1\n        self.assertEqual(strided_view.shape, (1, 2))\n        self.assertEqual(arr[0, 2], -1)\n\n        adapter = obj.get_array_adapter()\n        self.assertEqual(adapter.shape, (2, 2))\n        adapter[1, 1] = -2\n        self.assertEqual(arr[0, 5], -2)\n\n        adapter = obj.get_tensor_adapter()\n        self.assertEqual(adapter.shape, (2, 2))\n        adapter[1, 1] = -3\n        self.assertEqual(arr[0, 5], -3)\n\nclass AttributeTest(TestCase):\n\n    def setUp(self):\n        self.c = xt.C()\n\n    def test_copy(self):\n        arr = self.c.copy\n        arr[0] = 1\n        self.assertEqual([0.]*4, self.c.copy.tolist())\n\n    def test_reference(self):\n        arr = self.c.ref\n        arr[0] = 1\n        self.assertEqual([1.] + [0.]*3, self.c.ref.tolist())\n"
  },
  {
    "path": "xtensor-python.pc.in",
    "content": "prefix=@CMAKE_INSTALL_PREFIX@\nincludedir=${prefix}/include\n\nName: xtensor-python\nDescription: An extension to the xtensor library, offering Python bindings with enhanced NumPy support.\nVersion: @xtensor-python_VERSION@\nCflags: -I${includedir}\n"
  },
  {
    "path": "xtensor-pythonConfig.cmake.in",
    "content": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay          #\n# Copyright (c) QuantStack                                                 #\n#                                                                          #\n# Distributed under the terms of the BSD 3-Clause License.                 #\n#                                                                          #\n# The full license is in the file LICENSE, distributed with this software. #\n############################################################################\n\n# xtensor-python cmake module\n# This module sets the following variables in your project::\n#\n#   xtensor-python_FOUND - true if xtensor-python found on the system\n#   xtensor-python_INCLUDE_DIRS - the directory containing xtensor-python headers\n#   xtensor-python_LIBRARY - empty\n\n@PACKAGE_INIT@\n\nif(NOT TARGET @PROJECT_NAME@)\n  include(\"${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@Targets.cmake\")\n  get_target_property(@PROJECT_NAME@_INCLUDE_DIRS xtensor-python INTERFACE_INCLUDE_DIRECTORIES)\nendif()\n"
  }
]