Showing preview only (278K chars total). Download the full file or copy to clipboard to get everything.
Repository: xtensor-stack/xtensor-python
Branch: master
Commit: 3cc25f4c7531
Files: 73
Total size: 258.4 KB
Directory structure:
gitextract_lx7y446y/
├── .github/
│ └── workflows/
│ ├── linux.yml
│ ├── osx.yml
│ └── windows.yml
├── .gitignore
├── CMakeLists.txt
├── LICENSE
├── README.md
├── benchmark/
│ ├── CMakeLists.txt
│ ├── benchmark_pyarray.py
│ ├── benchmark_pybind_array.py
│ ├── benchmark_pybind_vectorize.py
│ ├── benchmark_pytensor.py
│ ├── benchmark_pyvectorize.py
│ ├── main.cpp
│ └── setup.py
├── cmake/
│ └── FindNumPy.cmake
├── docs/
│ ├── Doxyfile
│ ├── Makefile
│ ├── environment.yml
│ ├── make.bat
│ └── source/
│ ├── _static/
│ │ └── main_stylesheet.css
│ ├── api_reference.rst
│ ├── array_tensor.rst
│ ├── basic_usage.rst
│ ├── compilers.rst
│ ├── conf.py
│ ├── cookiecutter.rst
│ ├── dev_build_options.rst
│ ├── examples/
│ │ ├── copy_cast/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── example.py
│ │ │ └── main.cpp
│ │ ├── readme_example_1/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── example.py
│ │ │ └── main.cpp
│ │ └── sfinae/
│ │ ├── CMakeLists.txt
│ │ ├── example.py
│ │ ├── main.cpp
│ │ ├── mymodule.hpp
│ │ └── python.cpp
│ ├── examples.rst
│ ├── index.rst
│ ├── installation.rst
│ ├── numpy_capi.rst
│ ├── pyarray.rst
│ ├── pytensor.rst
│ ├── pyvectorize.rst
│ └── releasing.rst
├── environment-dev.yml
├── include/
│ └── xtensor-python/
│ ├── pyarray.hpp
│ ├── pyarray_backstrides.hpp
│ ├── pycontainer.hpp
│ ├── pynative_casters.hpp
│ ├── pystrides_adaptor.hpp
│ ├── pytensor.hpp
│ ├── pyvectorize.hpp
│ ├── xtensor_python_config.hpp
│ └── xtensor_type_caster_base.hpp
├── readthedocs.yml
├── test/
│ ├── CMakeLists.txt
│ ├── copyGTest.cmake.in
│ ├── downloadGTest.cmake.in
│ ├── main.cpp
│ ├── test_common.hpp
│ ├── test_pyarray.cpp
│ ├── test_pyarray_traits.cpp
│ ├── test_pytensor.cpp
│ ├── test_pyvectorize.cpp
│ └── test_sfinae.cpp
├── test_python/
│ ├── main.cpp
│ ├── setup.py
│ └── test_pyarray.py
├── xtensor-python.pc.in
└── xtensor-pythonConfig.cmake.in
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/workflows/linux.yml
================================================
name: Linux
on:
workflow_dispatch:
pull_request:
push:
branches: [master]
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash -e -l {0}
jobs:
build:
runs-on: ubuntu-24.04
name: ${{ matrix.sys.compiler }} ${{ matrix.sys.version }}
strategy:
fail-fast: false
matrix:
sys:
- {compiler: gcc, version: '11'}
- {compiler: gcc, version: '12'}
- {compiler: gcc, version: '13'}
- {compiler: gcc, version: '14'}
- {compiler: clang, version: '17'}
- {compiler: clang, version: '18'}
- {compiler: clang, version: '19'}
- {compiler: clang, version: '20'}
steps:
- name: Install GCC
if: matrix.sys.compiler == 'gcc'
uses: egor-tensin/setup-gcc@v1
with:
version: ${{matrix.sys.version}}
platform: x64
- name: Install LLVM and Clang
if: matrix.sys.compiler == 'clang'
run: |
wget https://apt.llvm.org/llvm.sh
chmod +x llvm.sh
sudo ./llvm.sh ${{matrix.sys.version}}
sudo apt-get install -y clang-tools-${{matrix.sys.version}}
sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-${{matrix.sys.version}} 200
sudo update-alternatives --install /usr/bin/clang clang /usr/bin/clang-${{matrix.sys.version}} 200
sudo update-alternatives --install /usr/bin/clang-scan-deps clang-scan-deps /usr/bin/clang-scan-deps-${{matrix.sys.version}} 200
sudo update-alternatives --set clang /usr/bin/clang-${{matrix.sys.version}}
sudo update-alternatives --set clang++ /usr/bin/clang++-${{matrix.sys.version}}
sudo update-alternatives --set clang-scan-deps /usr/bin/clang-scan-deps-${{matrix.sys.version}}
- name: Checkout code
uses: actions/checkout@v3
- name: Set conda environment
uses: mamba-org/setup-micromamba@v1
with:
environment-file: environment-dev.yml
cache-environment: true
- name: Configure using CMake
run: cmake -G Ninja -Bbuild -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -DPYTHON_EXECUTABLE=`which python` -DDOWNLOAD_GTEST=ON $(Build.SourcesDirectory)
- name: Install
working-directory: build
run: cmake --install .
- name: Build
working-directory: build
run: cmake --build . --target test_xtensor_python --parallel 8
- name: Run tests (C++)
working-directory: build/test
run: ./test_xtensor_python
- name: Run tests (Python)
run: pytest -s
- name: Example - readme 1
working-directory: docs/source/examples/readme_example_1
run: |
cmake -Bbuild -DPython_EXECUTABLE=`which python`
cd build
cmake --build .
cp ../example.py .
python example.py
- name: Example - copy \'cast\'
working-directory: docs/source/examples/copy_cast
run: |
cmake -Bbuild -DPython_EXECUTABLE=`which python`
cd build
cmake --build .
cp ../example.py .
python example.py
- name: Example - SFINAE
working-directory: docs/source/examples/sfinae
run: |
cmake -Bbuild -DPython_EXECUTABLE=`which python`
cd build
cmake --build .
cp ../example.py .
python example.py
================================================
FILE: .github/workflows/osx.yml
================================================
name: OSX
on:
workflow_dispatch:
pull_request:
push:
branches: [master]
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash -e -l {0}
jobs:
build:
runs-on: macos-${{ matrix.os }}
name: macos-${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os:
- 14
- 15
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set conda environment
uses: mamba-org/setup-micromamba@v1
with:
environment-file: environment-dev.yml
cache-environment: true
- name: Configure using CMake
run: cmake -G Ninja -Bbuild -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -DPYTHON_EXECUTABLE=`which python` -DDOWNLOAD_GTEST=ON $(Build.SourcesDirectory)
- name: Install
working-directory: build
run: cmake --install .
- name: Build
working-directory: build
run: cmake --build . --target test_xtensor_python --parallel 8
- name: Run tests (C++)
working-directory: build/test
run: ./test_xtensor_python
- name: Run tests (Python)
run: pytest -s
- name: Example - readme 1
working-directory: docs/source/examples/readme_example_1
run: |
cmake -Bbuild -DPython_EXECUTABLE=`which python`
cd build
cmake --build .
cp ../example.py .
python example.py
- name: Example - copy \'cast\'
working-directory: docs/source/examples/copy_cast
run: |
cmake -Bbuild -DPython_EXECUTABLE=`which python`
cd build
cmake --build .
cp ../example.py .
python example.py
- name: Example - SFINAE
working-directory: docs/source/examples/sfinae
run: |
cmake -Bbuild -DPython_EXECUTABLE=`which python`
cd build
cmake --build .
cp ../example.py .
python example.py
================================================
FILE: .github/workflows/windows.yml
================================================
name: Windows
on:
workflow_dispatch:
pull_request:
push:
branches: [master]
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash -e -l {0}
jobs:
build:
runs-on: [windows-latest]
name: Windows
steps:
- name: Setup MSVC
uses: ilammy/msvc-dev-cmd@v1
- name: Checkout code
uses: actions/checkout@v3
- name: Set conda environment
uses: mamba-org/setup-micromamba@v1
with:
environment-file: environment-dev.yml
cache-environment: true
- name: Configure using CMake
run: cmake -G Ninja -Bbuild -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -DPYTHON_EXECUTABLE=`which python` -DDOWNLOAD_GTEST=ON $(Build.SourcesDirectory)
- name: Install
working-directory: build
run: cmake --install .
- name: Build
working-directory: build
run: cmake --build . --target test_xtensor_python --parallel 8
- name: Run tests (C++)
working-directory: build/test
run: ./test_xtensor_python
- name: Run tests (Python)
run: pytest -s
================================================
FILE: .gitignore
================================================
# Prerequisites
*.d
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
# Vim tmp files
*.swp
# Build directory
build/
# Test build artefacts
test/test_xtensor_python
test/CMakeCache.txt
test/Makefile
test/CMakeFiles/
test/cmake_install.cmake
.pytest_cache/
# Documentation build artefacts
docs/CMakeCache.txt
docs/xml/
docs/build/
# Jupyter artefacts
.ipynb_checkpoints/
# Python
*.py[cod]
__pycache__
build
*.egg-info
# py.test
.cache/
================================================
FILE: CMakeLists.txt
================================================
############################################################################
# Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay #
# Copyright (c) QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
cmake_minimum_required(VERSION 3.29)
project(xtensor-python)
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake ${CMAKE_MODULE_PATH})
set(XTENSOR_PYTHON_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include)
# Versioning
# ==========
set(XTENSOR_PYTHON_CONFIG_FILE
"${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/xtensor_python_config.hpp")
file(STRINGS ${XTENSOR_PYTHON_CONFIG_FILE} xtensor_python_version_defines
REGEX "#define XTENSOR_PYTHON_VERSION_(MAJOR|MINOR|PATCH)")
foreach(ver ${xtensor_python_version_defines})
if(ver MATCHES "#define XTENSOR_PYTHON_VERSION_(MAJOR|MINOR|PATCH) +([^ ]+)$")
set(XTENSOR_PYTHON_VERSION_${CMAKE_MATCH_1} "${CMAKE_MATCH_2}" CACHE INTERNAL "")
endif()
endforeach()
set(${PROJECT_NAME}_VERSION
${XTENSOR_PYTHON_VERSION_MAJOR}.${XTENSOR_PYTHON_VERSION_MINOR}.${XTENSOR_PYTHON_VERSION_PATCH})
message(STATUS "xtensor-python v${${PROJECT_NAME}_VERSION}")
# Dependencies
# ============
set(xtensor_REQUIRED_VERSION 0.27.0)
if(TARGET xtensor)
set(xtensor_VERSION ${XTENSOR_VERSION_MAJOR}.${XTENSOR_VERSION_MINOR}.${XTENSOR_VERSION_PATCH})
# Note: This is not SEMVER compatible comparison
if( NOT ${xtensor_VERSION} VERSION_GREATER_EQUAL ${xtensor_REQUIRED_VERSION})
message(ERROR "Mismatch xtensor versions. Found '${xtensor_VERSION}' but requires: '${xtensor_REQUIRED_VERSION}'")
else()
message(STATUS "Found xtensor v${xtensor_VERSION}")
endif()
else()
find_package(xtensor ${xtensor_REQUIRED_VERSION} REQUIRED)
message(STATUS "Found xtensor: ${xtensor_INCLUDE_DIRS}/xtensor")
endif()
find_package(Python COMPONENTS Interpreter REQUIRED)
set(pybind11_REQUIRED_VERSION 3.0.0)
if (NOT TARGET pybind11::headers)
# Defaults to ON for cmake >= 3.18
# https://github.com/pybind/pybind11/blob/35ff42b56e9d34d9a944266eb25f2c899dbdfed7/CMakeLists.txt#L96
set(PYBIND11_FINDPYTHON OFF)
find_package(pybind11 ${pybind11_REQUIRED_VERSION} REQUIRED)
message(STATUS "Found pybind11: ${pybind11_INCLUDE_DIRS}/pybind11")
else ()
# pybind11 has a variable that indicates its version already, so use that
message(STATUS "Found pybind11 v${pybind11_VERSION}")
endif ()
# Look for NumPy headers, except if NUMPY_INCLUDE_DIRS is passed,
# which is required under some circumstances (such as wasm, where
# there is no real python executable)
if(NOT NUMPY_INCLUDE_DIRS)
find_package(NumPy REQUIRED)
endif()
message(STATUS "Found numpy: ${NUMPY_INCLUDE_DIRS}")
# Build
# =====
set(XTENSOR_PYTHON_HEADERS
${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pyarray.hpp
${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pyarray_backstrides.hpp
${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pycontainer.hpp
${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pynative_casters.hpp
${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pystrides_adaptor.hpp
${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pytensor.hpp
${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pyvectorize.hpp
${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/xtensor_python_config.hpp
${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/xtensor_type_caster_base.hpp
)
add_library(xtensor-python INTERFACE)
target_include_directories(xtensor-python INTERFACE
"$<BUILD_INTERFACE:${XTENSOR_PYTHON_INCLUDE_DIR};${pybind11_INCLUDE_DIRS};${NUMPY_INCLUDE_DIRS}>"
$<INSTALL_INTERFACE:include>)
target_link_libraries(xtensor-python INTERFACE xtensor)
get_target_property(inc_dir xtensor-python INTERFACE_INCLUDE_DIRECTORIES)
OPTION(BUILD_TESTS "xtensor test suite" OFF)
OPTION(DOWNLOAD_GTEST "build gtest from downloaded sources" OFF)
if(DOWNLOAD_GTEST OR GTEST_SRC_DIR)
set(BUILD_TESTS ON)
endif()
if(BUILD_TESTS)
if(MSVC)
set(PYTHON_MODULE_EXTENSION ".pyd")
else()
set(PYTHON_MODULE_EXTENSION ".so")
endif()
add_subdirectory(test)
add_subdirectory(benchmark)
endif()
# Installation
# ============
include(GNUInstallDirs)
include(CMakePackageConfigHelpers)
install(TARGETS xtensor-python
EXPORT ${PROJECT_NAME}-targets)
# Makes the project importable from the build directory
export(EXPORT ${PROJECT_NAME}-targets
FILE "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Targets.cmake")
install(FILES ${XTENSOR_PYTHON_HEADERS}
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/xtensor-python)
configure_file(${PROJECT_NAME}.pc.in
"${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc"
@ONLY)
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc"
DESTINATION "${CMAKE_INSTALL_DATADIR}/pkgconfig/")
set(XTENSOR_PYTHON_CMAKECONFIG_INSTALL_DIR "${CMAKE_INSTALL_DATADIR}/cmake/${PROJECT_NAME}" CACHE
STRING "install path for xtensor-pythonConfig.cmake")
configure_package_config_file(${PROJECT_NAME}Config.cmake.in
"${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake"
INSTALL_DESTINATION ${XTENSOR_PYTHON_CMAKECONFIG_INSTALL_DIR})
# xtensor-python is header-only and does not depend on the architecture.
# Remove CMAKE_SIZEOF_VOID_P from xtensor-pythonConfigVersion.cmake so that an xtensor-pythonConfig.cmake
# generated for a 64 bit target can be used for 32 bit targets and vice versa.
set(_XTENSOR_CMAKE_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P})
unset(CMAKE_SIZEOF_VOID_P)
write_basic_package_version_file(${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
VERSION ${${PROJECT_NAME}_VERSION}
COMPATIBILITY AnyNewerVersion)
set(CMAKE_SIZEOF_VOID_P ${_XTENSOR_CMAKE_SIZEOF_VOID_P})
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake
DESTINATION ${XTENSOR_PYTHON_CMAKECONFIG_INSTALL_DIR})
install(EXPORT ${PROJECT_NAME}-targets
FILE ${PROJECT_NAME}Targets.cmake
DESTINATION ${XTENSOR_PYTHON_CMAKECONFIG_INSTALL_DIR})
================================================
FILE: LICENSE
================================================
Copyright (c) 2016, Wolf Vollprecht, Johan Mabille and Sylvain Corlay
Copyright (c) 2016, QuantStack
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: README.md
================================================
# 
[](https://github.com/xtensor-stack/xtensor-python/actions/workflows/linux.yml)
[](https://github.com/xtensor-stack/xtensor-python/actions/workflows/osx.yml)
[](https://github.com/xtensor-stack/xtensor-python/actions/workflows/windows.yml)
[](https://xtensor-python.readthedocs.io/en/latest/?badge=latest)
[](https://xtensor.zulipchat.com/#narrow/channel/539553-Ask-anything)
Python bindings for the [xtensor](https://github.com/xtensor-stack/xtensor) C++ multi-dimensional array library.
- `xtensor` is a C++ library for multi-dimensional arrays enabling numpy-style broadcasting and lazy computing.
- `xtensor-python` enables inplace use of numpy arrays in C++ with all the benefits from `xtensor`
- C++ universal function and broadcasting
- STL - compliant APIs.
- A broad coverage of numpy APIs (see [the numpy to xtensor cheat sheet](http://xtensor.readthedocs.io/en/latest/numpy.html)).
The Python bindings for `xtensor` are based on the [pybind11](https://github.com/pybind/pybind11/) C++ library, which enables seamless interoperability between C++ and Python.
## Installation
`xtensor-python` is a header-only library. We provide a package for the mamba (or conda) package manager.
```bash
mamba install -c conda-forge xtensor-python
```
## Documentation
To get started with using `xtensor-python`, check out the full documentation
http://xtensor-python.readthedocs.io/
## Usage
xtensor-python offers two container types wrapping numpy arrays inplace to provide an xtensor semantics
- `pytensor`
- `pyarray`.
Both containers enable the numpy-style APIs of xtensor (see [the numpy to xtensor cheat sheet](http://xtensor.readthedocs.io/en/latest/numpy.html)).
- On the one hand, `pyarray` has a dynamic number of dimensions. Just like numpy arrays, it can be reshaped with a shape of a different length (and the new shape is reflected on the python side).
- On the other hand `pytensor` has a compile time number of dimensions, specified with a template parameter. Shapes of `pytensor` instances are stack allocated, making `pytensor` a significantly faster expression than `pyarray`.
### Example 1: Use an algorithm of the C++ standard library on a numpy array inplace.
**C++ code**
```cpp
#include <numeric> // Standard library import for std::accumulate
#include <pybind11/pybind11.h> // Pybind11 import to define Python bindings
#include <xtensor/core/xmath.hpp> // xtensor import for the C++ universal functions
#define FORCE_IMPORT_ARRAY
#include <xtensor-python/pyarray.hpp> // Numpy bindings
double sum_of_sines(xt::pyarray<double>& m)
{
auto sines = xt::sin(m); // sines does not actually hold values.
return std::accumulate(sines.begin(), sines.end(), 0.0);
}
PYBIND11_MODULE(xtensor_python_test, m)
{
xt::import_numpy();
m.doc() = "Test module for xtensor python bindings";
m.def("sum_of_sines", sum_of_sines, "Sum the sines of the input values");
}
```
**Python Code**
```python
import numpy as np
import xtensor_python_test as xt
v = np.arange(15).reshape(3, 5)
s = xt.sum_of_sines(v)
print(s)
```
**Outputs**
```
1.2853996391883833
```
**Working example**
Get the working example here:
* [`CMakeLists.txt`](docs/source/examples/readme_example_1/CMakeLists.txt)
* [`main.cpp`](docs/source/examples/readme_example_1/main.cpp)
* [`example.py`](docs/source/examples/readme_example_1/example.py)
### Example 2: Create a universal function from a C++ scalar function
**C++ code**
```cpp
#include <pybind11/pybind11.h>
#define FORCE_IMPORT_ARRAY
#include <xtensor-python/pyvectorize.hpp>
#include <numeric>
#include <cmath>
namespace py = pybind11;
double scalar_func(double i, double j)
{
return std::sin(i) - std::cos(j);
}
PYBIND11_MODULE(xtensor_python_test, m)
{
xt::import_numpy();
m.doc() = "Test module for xtensor python bindings";
m.def("vectorized_func", xt::pyvectorize(scalar_func), "");
}
```
**Python Code**
```python
import numpy as np
import xtensor_python_test as xt
x = np.arange(15).reshape(3, 5)
y = [1, 2, 3, 4, 5]
z = xt.vectorized_func(x, y)
print(z)
```
**Outputs**
```
[[-0.540302, 1.257618, 1.89929 , 0.794764, -1.040465],
[-1.499227, 0.136731, 1.646979, 1.643002, 0.128456],
[-1.084323, -0.583843, 0.45342 , 1.073811, 0.706945]]
```
## Installation
We provide a package for the conda package manager.
```bash
conda install -c conda-forge xtensor-python
```
This will pull the dependencies to xtensor-python, that is `pybind11` and `xtensor`.
## Project cookiecutter
A template for a project making use of `xtensor-python` is available in the form of a cookiecutter [here](https://github.com/xtensor-stack/xtensor-python-cookiecutter).
This project is meant to help library authors get started with the xtensor python bindings.
It produces a project following the best practices for the packaging and distribution of Python extensions based on `xtensor-python`, including a `setup.py` file and a conda recipe.
## Building and Running the Tests
Testing `xtensor-python` requires `pytest`
``` bash
py.test .
```
To pick up changes in `xtensor-python` while rebuilding, delete the `build/` directory.
## Building the HTML Documentation
`xtensor-python`'s documentation is built with three tools
- [doxygen](http://www.doxygen.org)
- [sphinx](http://www.sphinx-doc.org)
- [breathe](https://breathe.readthedocs.io)
While doxygen must be installed separately, you can install breathe by typing
```bash
pip install breathe
```
Breathe can also be installed with `conda`
```bash
conda install -c conda-forge breathe
```
Finally, build the documentation with
```bash
make html
```
from the `docs` subdirectory.
## Dependencies on `xtensor` and `pybind11`
`xtensor-python` depends on the `xtensor` and `pybind11` libraries
| `xtensor-python` | `xtensor` | `pybind11` |
|------------------|-----------|------------------|
| master | ^0.27.0 | >=2.6.1,<4 |
| 0.29.0 | ^0.27.0 | >=2.6.1,<4 |
| 0.28.0 | ^0.26.0 | >=2.6.1,<3 |
| 0.27.0 | ^0.25.0 | >=2.6.1,<3 |
| 0.26.1 | ^0.24.0 | ~2.4.3 |
| 0.26.0 | ^0.24.0 | ~2.4.3 |
| 0.25.3 | ^0.23.0 | ~2.4.3 |
| 0.25.2 | ^0.23.0 | ~2.4.3 |
| 0.25.1 | ^0.23.0 | ~2.4.3 |
| 0.25.0 | ^0.23.0 | ~2.4.3 |
| 0.24.1 | ^0.21.2 | ~2.4.3 |
| 0.24.0 | ^0.21.1 | ~2.4.3 |
These dependencies are automatically resolved when using the conda package manager.
## License
We use a shared copyright model that enables all contributors to maintain the
copyright on their contributions.
This software is licensed under the BSD-3-Clause license. See the [LICENSE](LICENSE) file for details.
================================================
FILE: benchmark/CMakeLists.txt
================================================
############################################################################
# Copyright (c) 2016, Johan Mabille and Sylvain Corlay #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
project(xtensor-python-benchmark)
find_package(xtensor-python REQUIRED CONFIG)
set(XTENSOR_PYTHON_INCLUDE_DIR ${xtensor-python_INCLUDE_DIRS})
endif ()
message(STATUS "Forcing tests build type to Release")
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE)
include(CheckCXXCompilerFlag)
string(TOUPPER "${CMAKE_BUILD_TYPE}" U_CMAKE_BUILD_TYPE)
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Intel")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native -Wunused-parameter -Wextra -Wreorder -Wconversion")
CHECK_CXX_COMPILER_FLAG("-std=c++14" HAS_CPP14_FLAG)
if (HAS_CPP14_FLAG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14")
else()
message(FATAL_ERROR "Unsupported compiler -- xtensor requires C++14 support!")
endif()
# Enable link time optimization and set the default symbol
# visibility to hidden (very important to obtain small binaries)
if (NOT ${U_CMAKE_BUILD_TYPE} MATCHES DEBUG)
# Default symbol visibility
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=hidden")
# Check for Link Time Optimization support
# (GCC/Clang)
CHECK_CXX_COMPILER_FLAG("-flto" HAS_LTO_FLAG)
if (HAS_LTO_FLAG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto")
endif()
# Intel equivalent to LTO is called IPO
if (CMAKE_CXX_COMPILER_ID MATCHES "Intel")
CHECK_CXX_COMPILER_FLAG("-ipo" HAS_IPO_FLAG)
if (HAS_IPO_FLAG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ipo")
endif()
endif()
endif()
endif()
if(MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /MP /bigobj")
set(CMAKE_EXE_LINKER_FLAGS /MANIFEST:NO)
foreach(flag_var
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
string(REPLACE "/MD" "-MT" ${flag_var} "${${flag_var}}")
endforeach()
endif()
set(XTENSOR_PYTHON_BENCHMARK
main.cpp
)
set(XTENSOR_PYTHON_BENCHMARK_TARGET benchmark_xtensor_python)
add_library(${XTENSOR_PYTHON_BENCHMARK_TARGET} MODULE EXCLUDE_FROM_ALL
${XTENSOR_PYTHON_BENCHMARK} ${XTENSOR_PYTHON_HEADERS})
set_target_properties(${XTENSOR_PYTHON_BENCHMARK_TARGET} PROPERTIES PREFIX "")
set_target_properties(${XTENSOR_PYTHON_BENCHMARK_TARGET} PROPERTIES SUFFIX "${PYTHON_MODULE_EXTENSION}")
if (APPLE)
target_link_libraries(${XTENSOR_PYTHON_BENCHMARK_TARGET} PRIVATE "-undefined dynamic_lookup")
elseif (MSVC)
target_link_libraries(${XTENSOR_PYTHON_BENCHMARK_TARGET} ${PYTHON_LIBRARIES})
else ()
target_link_libraries(${XTENSOR_PYTHON_BENCHMARK_TARGET} PRIVATE xtensor-python)
endif()
configure_file(benchmark_pyarray.py benchmark_pyarray.py COPYONLY)
configure_file(benchmark_pytensor.py benchmark_pytensor.py COPYONLY)
configure_file(benchmark_pybind_array.py benchmark_pybind_array.py COPYONLY)
configure_file(benchmark_pyvectorize.py benchmark_pyvectorize.py COPYONLY)
configure_file(benchmark_pybind_vectorize.py benchmark_pybind_vectorize.py COPYONLY)
add_custom_target(xbenchmark
COMMAND "${PYTHON_EXECUTABLE}" "benchmark_pyarray.py"
COMMAND "${PYTHON_EXECUTABLE}" "benchmark_pytensor.py"
COMMAND "${PYTHON_EXECUTABLE}" "benchmark_pybind_array.py"
COMMAND "${PYTHON_EXECUTABLE}" "benchmark_pyvectorize.py"
COMMAND "${PYTHON_EXECUTABLE}" "benchmark_pybind_vectorize.py"
DEPENDS ${XTENSOR_PYTHON_BENCHMARK_TARGET})
================================================
FILE: benchmark/benchmark_pyarray.py
================================================
from benchmark_xtensor_python import sum_array
import numpy as np
u = np.ones(1000000, dtype=float)
from timeit import timeit
print (timeit ('sum_array(u)', setup='from __main__ import u, sum_array', number=1000))
================================================
FILE: benchmark/benchmark_pybind_array.py
================================================
from benchmark_xtensor_python import pybind_sum_array
import numpy as np
u = np.ones(1000000, dtype=float)
from timeit import timeit
print (timeit ('pybind_sum_array(u)', setup='from __main__ import u, pybind_sum_array', number=1000))
================================================
FILE: benchmark/benchmark_pybind_vectorize.py
================================================
from benchmark_xtensor_python import pybind_rect_to_polar
import numpy as np
from timeit import timeit
w = np.ones(100000, dtype=complex)
print (timeit('pybind_rect_to_polar(w[::2])', 'from __main__ import w, pybind_rect_to_polar', number=1000))
================================================
FILE: benchmark/benchmark_pytensor.py
================================================
from benchmark_xtensor_python import sum_tensor
import numpy as np
u = np.ones(1000000, dtype=float)
#print(sum_tensor(u))
from timeit import timeit
print (timeit ('sum_tensor(u)', setup='from __main__ import u, sum_tensor', number=1000))
================================================
FILE: benchmark/benchmark_pyvectorize.py
================================================
from benchmark_xtensor_python import rect_to_polar
import numpy as np
from timeit import timeit
w = np.ones(100000, dtype=complex)
print (timeit('rect_to_polar(w[::2])', 'from __main__ import w, rect_to_polar', number=1000))
================================================
FILE: benchmark/main.cpp
================================================
#include "pybind11/pybind11.h"
#include "pybind11/numpy.h"
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include "numpy/arrayobject.h"
#include "xtensor/containers/xtensor.hpp"
#include "xtensor/containers/xarray.hpp"
#include "xtensor-python/pyarray.hpp"
#include "xtensor-python/pytensor.hpp"
#include "xtensor-python/pyvectorize.hpp"
using complex_t = std::complex<double>;
namespace py = pybind11;
PYBIND11_MODULE(benchmark_xtensor_python, m)
{
if(_import_array() < 0)
{
PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import");
}
m.doc() = "Benchmark module for xtensor python bindings";
m.def("sum_array", [](xt::pyarray<double> const& x) {
double sum = 0;
for(auto e : x)
sum += e;
return sum;
});
m.def("sum_tensor", [](xt::pytensor<double, 1> const& x) {
double sum = 0;
for(auto e : x)
sum += e;
return sum;
});
m.def("pybind_sum_array", [](py::array_t<double> const& x) {
double sum = 0;
size_t size = x.size();
const double* data = x.data(0);
for(size_t i = 0; i < size; ++i)
sum += data[i];
return sum;
});
m.def("rect_to_polar", [](xt::pyarray<complex_t> const& a) {
return py::vectorize([](complex_t x) { return std::abs(x); })(a);
});
m.def("pybind_rect_to_polar", [](py::array a) {
if (py::isinstance<py::array_t<complex_t>>(a))
return py::vectorize([](complex_t x) { return std::abs(x); })(a);
else
throw py::type_error("rect_to_polar unhandled type");
});
}
================================================
FILE: benchmark/setup.py
================================================
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import sys
import os
import setuptools
__version__ = '0.0.1'
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
class get_numpy_include(object):
"""Helper class to determine the numpy include path
The purpose of this class is to postpone importing numpy
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __str__(self):
import numpy
return numpy.get_include()
ext_modules = [
Extension(
'benchmark_xtensor_python',
['main.cpp'],
include_dirs=[
# Path to pybind11 headers
get_pybind_include(),
get_pybind_include(user=True),
# Path to numpy headers
get_numpy_include(),
os.path.join(sys.prefix, 'include'),
os.path.join(sys.prefix, 'Library', 'include')
],
language='c++'
),
]
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++14 compiler flag and errors when the flag is
no available.
"""
if has_flag(compiler, '-std=c++14'):
return '-std=c++14'
else:
raise RuntimeError('C++14 support is required by xtensor!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'msvc': ['/EHsc'],
'unix': [],
}
if sys.platform == 'darwin':
c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version())
opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, '-fvisibility=hidden'):
opts.append('-fvisibility=hidden')
elif ct == 'msvc':
opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version())
for ext in self.extensions:
ext.extra_compile_args = opts
build_ext.build_extensions(self)
setup(
name='benchmark_xtensor_python',
version=__version__,
author='Sylvain Corlay',
author_email='sylvain.corlay@gmail.com',
url='https://github.com/pybind/python_example',
description='An example project using xtensor-python',
long_description='',
ext_modules=ext_modules,
install_requires=['pybind11>=2.2.1'],
cmdclass={'build_ext': BuildExt},
zip_safe=False,
)
================================================
FILE: cmake/FindNumPy.cmake
================================================
# - Find the NumPy libraries
# This module finds if NumPy is installed, and sets the following variables
# indicating where it is.
#
# TODO: Update to provide the libraries and paths for linking npymath lib.
#
# NUMPY_FOUND - was NumPy found
# NUMPY_VERSION - the version of NumPy found as a string
# NUMPY_VERSION_MAJOR - the major version number of NumPy
# NUMPY_VERSION_MINOR - the minor version number of NumPy
# NUMPY_VERSION_PATCH - the patch version number of NumPy
# NUMPY_VERSION_DECIMAL - e.g. version 1.6.1 is 10601
# NUMPY_INCLUDE_DIRS - path to the NumPy include files
#============================================================================
# Copyright 2012 Continuum Analytics, Inc.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#============================================================================
# Finding NumPy involves calling the Python interpreter
if(NumPy_FIND_REQUIRED)
find_package(Python COMPONENTS Interpreter REQUIRED)
else()
find_package(Python COMPONENTS Interpreter)
endif()
if(NOT PYTHONINTERP_FOUND)
set(NUMPY_FOUND FALSE)
endif()
execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
"import numpy as n; print(n.__version__); print(n.get_include());"
RESULT_VARIABLE _NUMPY_SEARCH_SUCCESS
OUTPUT_VARIABLE _NUMPY_VALUES
ERROR_VARIABLE _NUMPY_ERROR_VALUE
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT _NUMPY_SEARCH_SUCCESS MATCHES 0)
if(NumPy_FIND_REQUIRED)
message(FATAL_ERROR
"NumPy import failure:\n${_NUMPY_ERROR_VALUE}")
endif()
set(NUMPY_FOUND FALSE)
endif()
# Convert the process output into a list
string(REGEX REPLACE ";" "\\\\;" _NUMPY_VALUES ${_NUMPY_VALUES})
string(REGEX REPLACE "\n" ";" _NUMPY_VALUES ${_NUMPY_VALUES})
list(GET _NUMPY_VALUES 0 NUMPY_VERSION)
list(GET _NUMPY_VALUES 1 NUMPY_INCLUDE_DIRS)
# Make sure all directory separators are '/'
string(REGEX REPLACE "\\\\" "/" NUMPY_INCLUDE_DIRS ${NUMPY_INCLUDE_DIRS})
# Get the major and minor version numbers
string(REGEX REPLACE "\\." ";" _NUMPY_VERSION_LIST ${NUMPY_VERSION})
list(GET _NUMPY_VERSION_LIST 0 NUMPY_VERSION_MAJOR)
list(GET _NUMPY_VERSION_LIST 1 NUMPY_VERSION_MINOR)
list(GET _NUMPY_VERSION_LIST 2 NUMPY_VERSION_PATCH)
string(REGEX MATCH "[0-9]*" NUMPY_VERSION_PATCH ${NUMPY_VERSION_PATCH})
math(EXPR NUMPY_VERSION_DECIMAL
"(${NUMPY_VERSION_MAJOR} * 10000) + (${NUMPY_VERSION_MINOR} * 100) + ${NUMPY_VERSION_PATCH}")
find_package_message(NUMPY
"Found NumPy: version \"${NUMPY_VERSION}\" ${NUMPY_INCLUDE_DIRS}"
"${NUMPY_INCLUDE_DIRS}${NUMPY_VERSION}")
set(NUMPY_FOUND TRUE)
================================================
FILE: docs/Doxyfile
================================================
PROJECT_NAME = "xtensor-python"
XML_OUTPUT = xml
INPUT = ../include
GENERATE_LATEX = NO
GENERATE_MAN = NO
GENERATE_RTF = NO
CASE_SENSE_NAMES = NO
GENERATE_HTML = NO
GENERATE_XML = YES
RECURSIVE = YES
QUIET = YES
JAVADOC_AUTOBRIEF = YES
================================================
FILE: docs/Makefile
================================================
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext api
default: html
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " applehelp to make an Apple Help Book"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
@echo " coverage to run coverage check of the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
rm -rf xml
html:
doxygen
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
doxygen
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
doxygen
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
doxygen
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
doxygen
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
doxygen
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
epub:
doxygen
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
doxygen
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
doxygen
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
doxygen
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
doxygen
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
doxygen
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
doxygen
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
doxygen
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
doxygen
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
doxygen
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
doxygen
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
doxygen
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
coverage:
doxygen
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
@echo "Testing of coverage in the sources finished, look at the " \
"results in $(BUILDDIR)/coverage/python.txt."
xml:
doxygen
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
doxygen
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
================================================
FILE: docs/environment.yml
================================================
name: xtensor-python-docs
channels:
- conda-forge
dependencies:
- breathe
- sphinx_rtd_theme
================================================
FILE: docs/make.bat
================================================
@ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source
set I18NSPHINXOPTS=%SPHINXOPTS% source
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
echo. texinfo to make Texinfo files
echo. gettext to make PO message catalogs
echo. changes to make an overview over all changed/added/deprecated items
echo. xml to make Docutils-native XML files
echo. pseudoxml to make pseudoxml-XML files for display purposes
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
echo. coverage to run coverage check of the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
REM Check if sphinx-build is available and fallback to Python version if any
%SPHINXBUILD% 1>NUL 2>NUL
if errorlevel 9009 goto sphinx_python
goto sphinx_ok
:sphinx_python
set SPHINXBUILD=python -m sphinx.__init__
%SPHINXBUILD% 2> nul
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
:sphinx_ok
if "%1" == "html" (
doxygen
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\packagename.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\packagename.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdf" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf
cd %~dp0
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdfja" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf-ja
cd %~dp0
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "texinfo" (
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
goto end
)
if "%1" == "gettext" (
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
if errorlevel 1 exit /b 1
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
if errorlevel 1 exit /b 1
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
if errorlevel 1 exit /b 1
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
if "%1" == "coverage" (
%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
if errorlevel 1 exit /b 1
echo.
echo.Testing of coverage in the sources finished, look at the ^
results in %BUILDDIR%/coverage/python.txt.
goto end
)
if "%1" == "xml" (
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The XML files are in %BUILDDIR%/xml.
goto end
)
if "%1" == "pseudoxml" (
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
goto end
)
:end
================================================
FILE: docs/source/_static/main_stylesheet.css
================================================
.wy-nav-content{
max-width: 1000px;
margin: auto;
}
================================================
FILE: docs/source/api_reference.rst
================================================
.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
API reference
=============
Containers
----------
.. toctree::
:maxdepth: 2
pyarray
pytensor
Numpy universal functions
-------------------------
.. toctree::
:maxdepth: 2
pyvectorize
================================================
FILE: docs/source/array_tensor.rst
================================================
.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Arrays and tensors
==================
``xtensor-python`` provides two container types wrapping numpy arrays: ``pyarray`` and ``pytensor``. They are the counterparts
to ``xarray`` and ``xtensor`` containers.
pyarray
-------
Like ``xarray``, ``pyarray`` has a dynamic shape. This means that you can reshape the numpy array on the C++ side and see this
change reflected on the python side. ``pyarray`` doesn't make a copy of the shape or the strides, but reads them each time it
is needed. Therefore, if a reference on a ``pyarray`` is kept in the C++ code and the corresponding numpy array is then reshaped
in the python code, this modification will reflect in the ``pyarray``.
pytensor
--------
Like ``xtensor``, ``pytensor`` has a static stack-allocated shape. This means that the shape of the numpy array is copied into
the shape of the ``pytensor`` upon creation. As a consequence, reshapes are not reflected across languages. However, this drawback
is offset by a more effective computation of shape and broadcast.
================================================
FILE: docs/source/basic_usage.rst
================================================
.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Basic Usage
===========
Example 1: Use an algorithm of the C++ library on a numpy array inplace
-----------------------------------------------------------------------
**C++ code**
.. code::
#include <numeric> // Standard library import for std::accumulate
#include "pybind11/pybind11.h" // Pybind11 import to define Python bindings
#include "xtensor/core/xmath.hpp" // xtensor import for the C++ universal functions
#define FORCE_IMPORT_ARRAY // numpy C api loading
#include "xtensor-python/pyarray.hpp" // Numpy bindings
double sum_of_sines(xt::pyarray<double>& m)
{
auto sines = xt::sin(m); // sines does not actually hold values.
return std::accumulate(sines.cbegin(), sines.cend(), 0.0);
}
PYBIND11_MODULE(xtensor_python_test, m)
{
xt::import_numpy();
m.doc() = "Test module for xtensor python bindings";
m.def("sum_of_sines", sum_of_sines, "Sum the sines of the input values");
}
**Python code:**
.. code::
import numpy as np
import xtensor_python_test as xt
a = np.arange(15).reshape(3, 5)
s = xt.sum_of_sines(v)
s
**Outputs**
.. code::
1.2853996391883833
Example 2: Create a numpy-style universal function from a C++ scalar function
-----------------------------------------------------------------------------
**C++ code**
.. code::
#include "pybind11/pybind11.h"
#define FORCE_IMPORT_ARRAY
#include "xtensor-python/pyvectorize.hpp"
#include <numeric>
#include <cmath>
namespace py = pybind11;
double scalar_func(double i, double j)
{
return std::sin(i) - std::cos(j);
}
PYBIND11_MODULE(xtensor_python_test, m)
{
xt::import_numpy();
m.doc() = "Test module for xtensor python bindings";
m.def("vectorized_func", xt::pyvectorize(scalar_func), "");
}
**Python code:**
.. code::
import numpy as np
import xtensor_python_test as xt
x = np.arange(15).reshape(3, 5)
y = [1, 2, 3, 4, 5]
z = xt.vectorized_func(x, y)
z
**Outputs**
.. code::
[[-0.540302, 1.257618, 1.89929 , 0.794764, -1.040465],
[-1.499227, 0.136731, 1.646979, 1.643002, 0.128456],
[-1.084323, -0.583843, 0.45342 , 1.073811, 0.706945]]
================================================
FILE: docs/source/compilers.rst
================================================
.. Copyright (c) 2016, Johan Mabille, Sylvain Corlay and Wolf Vollprecht
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Compiler workarounds
====================
This page tracks the workarounds for the various compiler issues that we
encountered in the development. This is mostly of interest for developers
interested in contributing to xtensor-python.
GCC and ``std::allocator<long long>``
-------------------------------------
GCC sometimes fails to automatically instantiate the ``std::allocator``
class template for the types ``long long`` and ``unsigned long long``.
Those allocators are thus explicitly instantiated in the dummy function
``void long_long_allocator()`` in the file ``py_container.hpp``.
================================================
FILE: docs/source/conf.py
================================================
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import subprocess
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
subprocess.call('cd ..; doxygen', shell=True)
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
def setup(app):
app.add_css_file("main_stylesheet.css")
extensions = ['breathe', 'sphinx_rtd_theme']
breathe_projects = { 'xtensor-python': '../xml' }
templates_path = ['_templates']
html_static_path = ['_static']
source_suffix = '.rst'
master_doc = 'index'
project = 'xtensor-python'
copyright = '2016, Johan Mabille and Sylvain Corlay'
author = 'Johan Mabille and Sylvain Corlay'
html_logo = 'quantstack-white.svg'
exclude_patterns = []
highlight_language = 'c++'
pygments_style = 'sphinx'
todo_include_todos = False
htmlhelp_basename = 'xtensorpythondoc'
================================================
FILE: docs/source/cookiecutter.rst
================================================
.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Getting started with xtensor-python-cookiecutter
================================================
`xtensor-python-cookiecutter`_ helps extension authors create Python extension modules making use of xtensor.
It takes care of the initial work of generating a project skeleton with
- A complete ``setup.py`` compiling the extension module
- A few examples included in the resulting project including
- A universal function defined from C++
- A function making use of an algorithm from the STL on a numpy array
- Unit tests
- The generation of the HTML documentation with sphinx
Usage
-----
Install cookiecutter_
.. code::
pip install cookiecutter
After installing cookiecutter, use the `xtensor-python-cookiecutter`_:
.. code::
cookiecutter https://github.com/xtensor-stack/xtensor-python-cookiecutter.git
As xtensor-python-cookiecutter runs, you will be asked for basic information about
your custom extension project. You will be prompted for the following
information:
- ``author_name``: your name or the name of your organization,
- ``author_email`` : your project's contact email,
- ``github_project_name``: name of the GitHub repository for your project,
- ``github_organization_name``: name of the GithHub organization for your project,
- ``python_package_name``: name of the Python package created by your extension,
- ``cpp_namespace``: name for the cpp namespace holding the implementation of your extension,
- ``project_short_description``: a short description for your project.
This will produce a directory containing all the required content for a minimal extension
project making use of xtensor with all the required boilerplate for package management,
together with a few basic examples.
.. _xtensor-python-cookiecutter: https://github.com/xtensor-stack/xtensor-python-cookiecutter
.. _cookiecutter: https://github.com/audreyr/cookiecutter
================================================
FILE: docs/source/dev_build_options.rst
================================================
.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Build, test and benchmark
=========================
``xtensor-python`` build supports the following options:
- ``BUILD_TESTS``: enables the ``xtest`` and ``xbenchmark`` targets (see below).
- ``DOWNLOAD_GTEST``: downloads ``gtest`` and builds it locally instead of using a binary installation.
- ``GTEST_SRC_DIR``: indicates where to find the ``gtest`` sources instead of downloading them.
All these options are disabled by default. Enabling ``DOWNLOAD_GTEST`` or
setting ``GTEST_SRC_DIR`` enables ``BUILD_TESTS``.
If the ``BUILD_TESTS`` option is enabled, the following targets are available:
- xtest: builds an run the test suite.
- xbenchmark: builds and runs the benchmarks.
For instance, building the test suite of ``xtensor-python`` and downloading ``gtest`` automatically:
.. code::
mkdir build
cd build
cmake -DDOWNLOAD_GTEST=ON ../
make xtest
To run the benchmark:
.. code::
make xbenchmark
To test the Python bindings:
.. code::
cd ..
pytest -s
================================================
FILE: docs/source/examples/copy_cast/CMakeLists.txt
================================================
cmake_minimum_required(VERSION 3.29)
project(mymodule)
find_package(pybind11 CONFIG REQUIRED)
find_package(xtensor REQUIRED)
find_package(xtensor-python REQUIRED)
find_package(Python REQUIRED COMPONENTS NumPy)
pybind11_add_module(mymodule main.cpp)
target_link_libraries(mymodule PUBLIC pybind11::module xtensor-python Python::NumPy)
target_compile_definitions(mymodule PRIVATE VERSION_INFO=0.1.0)
================================================
FILE: docs/source/examples/copy_cast/example.py
================================================
import mymodule
import numpy as np
c = np.array([[1, 2, 3], [4, 5, 6]])
assert np.isclose(np.sum(np.sin(c)), mymodule.sum_of_sines(c))
assert np.isclose(np.sum(np.cos(c)), mymodule.sum_of_cosines(c))
================================================
FILE: docs/source/examples/copy_cast/main.cpp
================================================
#include <numeric>
#include <xtensor.hpp>
#include <pybind11/pybind11.h>
#define FORCE_IMPORT_ARRAY
#include <xtensor-python/pyarray.hpp>
template <class T>
double sum_of_sines(T& m)
{
auto sines = xt::sin(m); // sines does not actually hold values.
return std::accumulate(sines.begin(), sines.end(), 0.0);
}
// In the Python API this a reference to a temporary variable
double sum_of_cosines(const xt::xarray<double>& m)
{
auto cosines = xt::cos(m); // cosines does not actually hold values.
return std::accumulate(cosines.begin(), cosines.end(), 0.0);
}
PYBIND11_MODULE(mymodule, m)
{
xt::import_numpy();
m.doc() = "Test module for xtensor python bindings";
m.def("sum_of_sines", sum_of_sines<xt::pyarray<double>>, "Sum the sines of the input values");
m.def("sum_of_cosines", sum_of_cosines, "Sum the cosines of the input values");
}
================================================
FILE: docs/source/examples/readme_example_1/CMakeLists.txt
================================================
cmake_minimum_required(VERSION 3.29)
project(mymodule)
find_package(Python REQUIRED COMPONENTS Interpreter Development NumPy)
find_package(pybind11 REQUIRED CONFIG)
find_package(xtensor REQUIRED)
find_package(xtensor-python REQUIRED)
pybind11_add_module(mymodule main.cpp)
target_link_libraries(mymodule PUBLIC pybind11::module xtensor-python Python::NumPy)
target_compile_definitions(mymodule PRIVATE VERSION_INFO=0.1.0)
================================================
FILE: docs/source/examples/readme_example_1/example.py
================================================
import mymodule
import numpy as np
a = np.array([1, 2, 3])
assert np.isclose(np.sum(np.sin(a)), mymodule.sum_of_sines(a))
================================================
FILE: docs/source/examples/readme_example_1/main.cpp
================================================
#include <numeric>
#include <xtensor.hpp>
#include <pybind11/pybind11.h>
#define FORCE_IMPORT_ARRAY
#include <xtensor-python/pyarray.hpp>
double sum_of_sines(xt::pyarray<double>& m)
{
auto sines = xt::sin(m); // sines does not actually hold values.
return std::accumulate(sines.begin(), sines.end(), 0.0);
}
PYBIND11_MODULE(mymodule, m)
{
xt::import_numpy();
m.doc() = "Test module for xtensor python bindings";
m.def("sum_of_sines", sum_of_sines, "Sum the sines of the input values");
}
================================================
FILE: docs/source/examples/sfinae/CMakeLists.txt
================================================
cmake_minimum_required(VERSION 3.29)
project(mymodule)
find_package(Python REQUIRED COMPONENTS Interpreter Development NumPy)
find_package(pybind11 REQUIRED CONFIG)
find_package(xtensor REQUIRED)
find_package(xtensor-python REQUIRED)
pybind11_add_module(mymodule python.cpp)
target_link_libraries(mymodule PUBLIC pybind11::module xtensor-python Python::NumPy)
target_compile_definitions(mymodule PRIVATE VERSION_INFO=0.1.0)
add_executable(myexec main.cpp)
target_link_libraries(myexec PUBLIC xtensor)
================================================
FILE: docs/source/examples/sfinae/example.py
================================================
import mymodule
import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64)
b = np.array(a, copy=True)
mymodule.times_dimension(b) # changing in-place!
assert np.allclose(2 * a, b)
================================================
FILE: docs/source/examples/sfinae/main.cpp
================================================
#include "mymodule.hpp"
#include <xtensor/io/xio.hpp>
int main()
{
xt::xtensor<size_t, 2> a = xt::arange<size_t>(2 * 3).reshape({2, 3});
mymodule::times_dimension(a);
std::cout << a << std::endl;
return 0;
}
================================================
FILE: docs/source/examples/sfinae/mymodule.hpp
================================================
#include <xtensor/containers/xtensor.hpp>
namespace mymodule {
template <class T>
struct is_std_vector
{
static const bool value = false;
};
template <class T>
struct is_std_vector<std::vector<T> >
{
static const bool value = true;
};
// any xtensor object
template <class T, std::enable_if_t<xt::is_xexpression<T>::value, bool> = true>
void times_dimension(T& t)
{
using value_type = typename T::value_type;
t *= (value_type)(t.dimension());
}
// an std::vector
template <class T, std::enable_if_t<is_std_vector<T>::value, bool> = true>
void times_dimension(T& t)
{
// do nothing
}
}
================================================
FILE: docs/source/examples/sfinae/python.cpp
================================================
#include "mymodule.hpp"
#include <pybind11/pybind11.h>
#define FORCE_IMPORT_ARRAY
#include <xtensor-python/pyarray.hpp>
PYBIND11_MODULE(mymodule, m)
{
xt::import_numpy();
m.doc() = "Test module for xtensor python bindings";
m.def("times_dimension", &mymodule::times_dimension<xt::pyarray<double>>);
}
================================================
FILE: docs/source/examples.rst
================================================
****************
(CMake) Examples
****************
Basic example (from readme)
===========================
Consider the following C++ code:
:download:`main.cpp <examples/readme_example_1/main.cpp>`
.. literalinclude:: examples/readme_example_1/main.cpp
:language: cpp
There are several options to build the module,
whereby we will use *CMake* here with the following ``CMakeLists.txt``:
:download:`CMakeLists.txt <examples/readme_example_1/CMakeLists.txt>`
.. literalinclude:: examples/readme_example_1/CMakeLists.txt
:language: cmake
.. tip::
There is a potential pitfall here, centered around the fact that *CMake*
has a 'new' *FindPython* and a 'classic' *FindPythonLibs*.
We here use *FindPython* because of its ability to find the NumPy headers,
that we need for *xtensor-python*.
This has the consequence that when we want to force *CMake*
to use a specific *Python* executable, we have to use something like
.. code-block:: none
cmake -Bbuild -DPython_EXECUTABLE=`which python`
whereby it is crucial that one uses the correct case ``Python_EXECUTABLE``, as:
.. code-block:: none
Python_EXECUTABLE <-> FindPython
PYTHON_EXECUTABLE <-> FindPythonLibs
(remember that *CMake* is **case-sensitive**!).
Now, since we use *FindPython* because of *xtensor-python* we also want *pybind11*
to use *FindPython*
(and not the classic *FindPythonLibs*,
since we want to specify the *Python* executable only once).
To this end we have to make sure to do things in the correct order, which is
.. code-block:: cmake
find_package(Python REQUIRED COMPONENTS Interpreter Development NumPy)
find_package(pybind11 REQUIRED CONFIG)
(i.e. one finds *Python* **before** *pybind11*).
See the `pybind11 documentation <https://pybind11.readthedocs.io/en/latest/cmake/index.html#new-findpython-mode>`_.
In addition, be sure to use a quite recent *CMake* version,
by starting your ``CMakeLists.txt`` for example with
.. code-block:: cmake
cmake_minimum_required(VERSION 3.18..3.20)
Then we can test the module:
:download:`example.py <examples/readme_example_1/example.py>`
.. literalinclude:: examples/readme_example_1/example.py
:language: cmake
.. note::
Since we did not install the module,
we should compile and run the example from the same folder.
To install, please consult
`this *pybind11* / *CMake* example <https://github.com/pybind/cmake_example>`_.
Type restriction with SFINAE
============================
.. seealso::
`Medium post by Johan Mabille <https://medium.com/@johan.mabille/designing-language-bindings-with-xtensor-f32aa0f20db>`__
This example covers "Option 4".
In this example we will design a module with a function that accepts an ``xt::xtensor`` as argument,
but in such a way that an ``xt::pyxtensor`` can be accepted in the Python module.
This is done by having a templated function
.. code-block:: cpp
template <class T>
void times_dimension(T& t);
As this might be a bit too permissive for your liking, we will show you how to limit the
scope to *xtensor* types, and allow other overloads using the principle of SFINAE
(Substitution Failure Is Not An Error).
In particular:
:download:`mymodule.hpp <examples/sfinae/mymodule.hpp>`
.. literalinclude:: examples/sfinae/mymodule.hpp
:language: cpp
Consequently from C++, the interaction with the module's function is trivial
:download:`main.cpp <examples/sfinae/main.cpp>`
.. literalinclude:: examples/sfinae/main.cpp
:language: cpp
For the Python module we just have to specify the template to be
``xt::pyarray`` or ``xt::pytensor``. E.g.
:download:`src/python.cpp <examples/sfinae/python.cpp>`
.. literalinclude:: examples/sfinae/python.cpp
:language: cpp
We will again use *CMake* to compile, with the following ``CMakeLists.txt``:
:download:`CMakeLists.txt <examples/sfinae/CMakeLists.txt>`
.. literalinclude:: examples/sfinae/CMakeLists.txt
:language: cmake
(see *CMake* tip above).
Then we can test the module:
:download:`example.py <examples/readme_example_1/example.py>`
.. literalinclude:: examples/readme_example_1/example.py
:language: cmake
.. note::
Since we did not install the module,
we should compile and run the example from the same folder.
To install, please consult
`this pybind11 / CMake example <https://github.com/pybind/cmake_example>`_.
**Tip**: take care to modify that example with the correct *CMake* case ``Python_EXECUTABLE``.
Fall-back cast
==============
The previous example showed you how to design your module to be flexible in accepting data.
From C++ we used ``xt::xarray<double>``,
whereas for the Python API we used ``xt::pyarray<double>`` to operate directly on the memory
of a NumPy array from Python (without copying the data).
Sometimes, you might not have the flexibility to design your module's methods
with template parameters.
This might occur when you want to ``override`` functions
(though it is recommended to use CRTP to still use templates).
In this case we can still bind the module in Python using *xtensor-python*,
however, we have to copy the data from a (NumPy) array.
This means that although the following signatures are quite different when used from C++,
as follows:
1. *Constant reference*: read from the data, without copying it.
.. code-block:: cpp
void foo(const xt::xarray<double>& a);
2. *Reference*: read from and/or write to the data, without copying it.
.. code-block:: cpp
void foo(xt::xarray<double>& a);
3. *Copy*: copy the data.
.. code-block:: cpp
void foo(xt::xarray<double> a);
The Python will all cases result in a copy to a temporary variable
(though the last signature will lead to a copy to a temporary variable, and another copy to ``a``).
On the one hand, this is more costly than when using ``xt::pyarray`` and ``xt::pyxtensor``,
on the other hand, it means that all changes you make to a reference, are made to the temporary
copy, and are thus lost.
Still, it might be a convenient way to create Python bindings, using a minimal effort.
Consider this example:
:download:`main.cpp <examples/copy_cast/main.cpp>`
.. literalinclude:: examples/copy_cast/main.cpp
:language: cpp
================================================
FILE: docs/source/index.rst
================================================
.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
.. image:: xtensor-python.svg
Python bindings for the xtensor_ C++ multi-dimensional array library.
Introduction
------------
What are ``xtensor`` and ``xtensor-python``?
- ``xtensor`` is a C++ library for multi-dimensional arrays enabling numpy-style broadcasting and lazy computing.
- ``xtensor-python`` enables inplace use of numpy arrays with all the benefits from ``xtensor``
- C++ universal functions and broadcasting
- STL - compliant APIs.
The `numpy to xtensor cheat sheet`_ from the ``xtensor`` documentation shows how numpy APIs translate to C++ with ``xtensor``.
The Python bindings for ``xtensor`` are based on the pybind11_ C++ library, which enables seemless interoperability between C++ and Python.
Enabling numpy arrays in your C++ libraries
-------------------------------------------
Instead of exposing new types to python, ``xtensor-python`` enables the use of NumPy_ data structures from C++ using Python's `Buffer Protocol`_.
In addition to the basic accessors and iterators of ``xtensor`` containers, it also enables using numpy arrays with ``xtensor``'s expression system.
Besides ``xtensor-python`` provides an API to create *Universal functions* from simple scalar functions from your C++ code.
Finally, a cookiecutter template project is provided. It takes care of the initial work of generating a project skeleton for a C++ extension based on ``xtensor-python`` containing a few examples, unit tests and HTML documentation. Find out more about the xtensor-python-cookiecutter_.
``xtensor`` and ``xtensor-python`` require a modern C++ compiler supporting C++14. The following C++ compilers are supported:
- On Windows platforms, Visual C++ 2015 Update 2, or more recent
- On Unix platforms, gcc 4.9 or a recent version of Clang
Licensing
---------
We use a shared copyright model that enables all contributors to maintain the
copyright on their contributions.
This software is licensed under the BSD-3-Clause license. See the LICENSE file for details.
.. toctree::
:caption: INSTALLATION
:maxdepth: 2
installation
.. toctree::
:caption: USAGE
:maxdepth: 2
basic_usage
array_tensor
numpy_capi
examples
cookiecutter
.. toctree::
:caption: API REFERENCE
:maxdepth: 2
api_reference
.. toctree::
:caption: DEVELOPER ZONE
dev_build_options
compilers
releasing
.. _NumPy: http://www.numpy.org
.. _`Buffer Protocol`: https://docs.python.org/3/c-api/buffer.html
.. _`numpy to xtensor cheat sheet`: http://xtensor.readthedocs.io/en/latest/numpy.html
.. _xtensor: https://github.com/xtensor-stack/xtensor
.. _pybind11: https://github.com/pybind/pybind11
.. _xtensor-python-cookiecutter: https://github.com/xtensor-stack/xtensor-python-cookiecutter
================================================
FILE: docs/source/installation.rst
================================================
.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
.. raw:: html
<style>
.rst-content .section>img {
width: 30px;
margin-bottom: 0;
margin-top: 0;
margin-right: 15px;
margin-left: 15px;
float: left;
}
</style>
Installation
============
Although ``xtensor-python`` is a header-only library, we provide standardized means to install it, with package managers or with cmake.
Besides the xtendor-python headers, all these methods place the `cmake` project configuration file in the right location so that third-party projects can use cmake's find_package to locate xtensor-python headers.
.. image:: conda.svg
Using the conda-forge package
----------------------------
A package for xtensor-python is available on the mamba (or conda) package manager.
.. code::
mamba install -c conda-forge xtensor-python
.. image:: debian.svg
Using the Debian package
------------------------
A package for xtensor-python is available on Debian.
.. code::
sudo apt-get install xtensor-python-dev
.. image:: cmake.svg
From source with cmake
----------------------
You can also install ``xtensor-python`` from source with cmake. On Unix platforms, from the source directory:
.. code::
mkdir build
cd build
cmake -DCMAKE_INSTALL_PREFIX=/path/to/prefix ..
make install
On Windows platforms, from the source directory:
.. code::
mkdir build
cd build
cmake -G "NMake Makefiles" -DCMAKE_INSTALL_PREFIX=/path/to/prefix ..
nmake
nmake install
See the section of the documentation on :doc:`build-options`, for more details on how to cmake options.
================================================
FILE: docs/source/numpy_capi.rst
================================================
.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Importing numpy C API
=====================
Importing the C API module of numpy requires more code than just including a header. ``xtensor-python`` simplifies a lot
this import, however some actions are still required in the user code.
Extension module with a single file
-----------------------------------
When writing an extension module that is self-contained in a single file, its author should pay attention to the following
points:
- ``FORCE_IMPORT_ARRAY`` must be defined before including any header of ``xtensor-python``.
- ``xt::import_numpy()`` must be called in the function initializing the module.
Thus the basic skeleton of the module looks like:
.. code::
#define FORCE_IMPORT_ARRAY
#include "xtensor-python/pyarray.hpp"
PYBIND11_MODULE(plugin_name, m)
{
xt::import_numpy();
//...
}
Extension module with multiple files
------------------------------------
If the extension module contains many source files that include ``xtensor-python`` header files, the previous points are still
required. However, the symbol ``FORCE_IMPORT_ARRAY`` must be defined only once. The simplest is to define it int the file that
contains the initializing code of the module, you can then directly include ``xtensor-python`` headers in other files. Let's
illustrate this with an extension modules containing the following files:
- ``main.cpp``: initializing code of the module
- ``image.hpp``: declaration of the ``image`` class embedding an ``xt::pyarray`` object
- ``image.cpp``: implementation of the ``image`` class
The basic skeleton of the module looks like:
.. code::
// image.hpp
// Do NOT define FORCE_IMPORT_ARRAY here
#include "xtensor-python/pyarray.hpp"
class image
{
// ....
private:
xt::pyarray<double> m_data;
};
// image.cpp
// Do NOT define FORCE_IMPORT_ARRAY here
#include "image.hpp"
// definition of the image class
// main.cpp
// FORCE_IMPORT_ARRAY must be define ONCE, BEFORE including
// any header from xtensor-python (even indirectly)
#define FORCE_IMPORT_ARRAY
#include "image.hpp"
PYBIND11_MODULE(plugin_name, m)
{
xt::import_numpy();
//...
}
Using other extension modules
-----------------------------
Including an header of ``xtensor-python`` actually defines ``PY_ARRAY_UNIQUE_SYMBOL`` to ``xtensor_python_ARRAY_API``. This might
be problematic if you import another library that defines its own ``PY_ARRAY_UNIQUE_SYMBOL``, or if you define yours. If so,
you can override the behavior of ``xtensor-python`` by explicitly defining ``PY_ARRAY_UNIQUE_SYMBOL`` prior to including any
``stenxor-python`` header:
.. code::
// in every source file
#define PY_ARRAY_UNIQUE_SYMBOL my_uniqe_array_api
#include "xtensor-python/pyarray.hpp"
================================================
FILE: docs/source/pyarray.rst
================================================
.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
pyarray
=======
.. doxygenclass:: xt::pyarray
:project: xtensor-python
:members:
================================================
FILE: docs/source/pytensor.rst
================================================
.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
pytensor
========
.. doxygenclass:: xt::pytensor
:project: xtensor-python
:members:
================================================
FILE: docs/source/pyvectorize.rst
================================================
.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
pyvectorize
===========
.. doxygenfunction:: xt::pyvectorize
:project: xtensor-python
================================================
FILE: docs/source/releasing.rst
================================================
.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Releasing xtensor-python
========================
Releasing a new version
-----------------------
From the master branch of xtensor-python
- Make sure that you are in sync with the master branch of the upstream remote.
- In file ``xtensor_python_config.hpp``, set the macros for ``XTENSOR_PYTHON_VERSION_MAJOR``, ``XTENSOR_PYTHON_VERSION_MINOR`` and ``XTENSOR_PYTHON_VERSION_PATCH`` to the desired values.
- Update the readme file w.r.t. dependencies on xtensor and pybind11.
- Stage the changes (``git add``), commit the changes (``git commit``) and add a tag of the form ``Major.minor.patch``. It is important to not add any other content to the tag name.
- Push the new commit and tag to the main repository. (``git push``, and ``git push --tags``)
Updating the conda-forge recipe
-------------------------------
xtensor-python has been packaged for the conda package manager. Once the new tag has been pushed on GitHub, edit the conda-forge recipe for xtensor in the following fashion:
- Update the version number to the new Major.minor.patch.
- Set the build number to 0.
- Update the hash of the source tarball.
- Check for the versions of the dependencies.
- Optionally, rerender the conda-forge feedstock.
================================================
FILE: environment-dev.yml
================================================
name: xtensor-python
channels:
- conda-forge
dependencies:
# Build dependencies
- cmake
- ninja
# Host dependencies
- xtensor>=0.27,<0.28
- numpy>=2.0
- pybind11>=2.12.0,<4
# Test dependencies
- setuptools
- pytest
================================================
FILE: include/xtensor-python/pyarray.hpp
================================================
/***************************************************************************
* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay *
* Copyright (c) QuantStack *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
#ifndef PY_ARRAY_HPP
#define PY_ARRAY_HPP
#include <algorithm>
#include <cstddef>
#include <vector>
#include "xtensor/containers/xbuffer_adaptor.hpp"
#include "xtensor/core/xiterator.hpp"
#include "xtensor/core/xsemantic.hpp"
#include "pyarray_backstrides.hpp"
#include "pycontainer.hpp"
#include "pystrides_adaptor.hpp"
#include "pynative_casters.hpp"
#include "xtensor_type_caster_base.hpp"
#include "xtensor_python_config.hpp"
namespace xt
{
template <class T, layout_type L = layout_type::dynamic>
class pyarray;
}
namespace pybind11
{
namespace detail
{
#ifdef PYBIND11_DESCR // The macro is removed from pybind11 since 2.3
template <class T, xt::layout_type L>
struct handle_type_name<xt::pyarray<T, L>>
{
static PYBIND11_DESCR name()
{
return _("numpy.ndarray[") + npy_format_descriptor<T>::name() + _("]");
}
};
#endif
template <typename T, xt::layout_type L>
struct pyobject_caster<xt::pyarray<T, L>>
{
using type = xt::pyarray<T, L>;
bool load(handle src, bool convert)
{
if (!convert)
{
if (!xt::detail::check_array<T>(src))
{
return false;
}
}
value = type::ensure(src);
return static_cast<bool>(value);
}
static handle cast(const handle& src, return_value_policy, handle)
{
return src.inc_ref();
}
#ifdef PYBIND11_DESCR // The macro is removed from pybind11 since 2.3
PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name());
#else
PYBIND11_TYPE_CASTER(type, _("numpy.ndarray[") + npy_format_descriptor<T>::name + _("]"));
#endif
};
// Type caster for casting ndarray to xexpression<pyarray>
template <typename T, xt::layout_type L>
struct type_caster<xt::xexpression<xt::pyarray<T, L>>> : pyobject_caster<xt::pyarray<T, L>>
{
using Type = xt::xexpression<xt::pyarray<T, L>>;
operator Type&()
{
return this->value;
}
operator const Type&()
{
return this->value;
}
};
}
}
namespace xt
{
template <class T, layout_type L>
struct xiterable_inner_types<pyarray<T, L>>
: xcontainer_iterable_types<pyarray<T, L>>
{
};
template <class T, layout_type L>
struct xcontainer_inner_types<pyarray<T, L>>
{
using storage_type = xbuffer_adaptor<T*>;
using reference = typename storage_type::reference;
using const_reference = typename storage_type::const_reference;
using size_type = typename storage_type::size_type;
using shape_type = std::vector<typename storage_type::size_type>;
using strides_type = std::vector<typename storage_type::difference_type>;
using backstrides_type = pyarray_backstrides<pyarray<T, L>>;
using inner_shape_type = xbuffer_adaptor<std::size_t*>;
using inner_strides_type = pystrides_adaptor<sizeof(T)>;
using inner_backstrides_type = backstrides_type;
using temporary_type = pyarray<T, L>;
static constexpr layout_type layout = L;
};
/**
* @class pyarray
* @brief Multidimensional container providing the xtensor container semantics to a numpy array.
*
* pyarray is similar to the xarray container in that it has a dynamic dimensionality.
* Reshapes of a pyarray container are reflected in the underlying numpy array.
*
* @tparam T The type of the element stored in the pyarray.
* @tparam L Static layout of the pyarray
*
* @sa pytensor
*/
template <class T, layout_type L>
class pyarray : public pycontainer<pyarray<T, L>>,
public xcontainer_semantic<pyarray<T, L>>
{
public:
using self_type = pyarray<T, L>;
using semantic_base = xcontainer_semantic<self_type>;
using base_type = pycontainer<self_type>;
using storage_type = typename base_type::storage_type;
using value_type = typename base_type::value_type;
using reference = typename base_type::reference;
using const_reference = typename base_type::const_reference;
using pointer = typename base_type::pointer;
using size_type = typename base_type::size_type;
using difference_type = typename base_type::difference_type;
using shape_type = typename base_type::shape_type;
using strides_type = typename base_type::strides_type;
using backstrides_type = typename base_type::backstrides_type;
using inner_shape_type = typename base_type::inner_shape_type;
using inner_strides_type = typename base_type::inner_strides_type;
using inner_backstrides_type = typename base_type::inner_backstrides_type;
constexpr static std::size_t rank = SIZE_MAX;
pyarray();
pyarray(const value_type& t);
pyarray(nested_initializer_list_t<T, 1> t);
pyarray(nested_initializer_list_t<T, 2> t);
pyarray(nested_initializer_list_t<T, 3> t);
pyarray(nested_initializer_list_t<T, 4> t);
pyarray(nested_initializer_list_t<T, 5> t);
pyarray(pybind11::handle h, pybind11::object::borrowed_t);
pyarray(pybind11::handle h, pybind11::object::stolen_t);
pyarray(const pybind11::object& o);
explicit pyarray(const shape_type& shape, layout_type l = layout_type::row_major);
explicit pyarray(const shape_type& shape, const_reference value, layout_type l = layout_type::row_major);
explicit pyarray(const shape_type& shape, const strides_type& strides, const_reference value);
explicit pyarray(const shape_type& shape, const strides_type& strides);
template <class S = shape_type>
static pyarray from_shape(S&& s);
pyarray(const self_type& rhs);
self_type& operator=(const self_type& rhs);
pyarray(self_type&&) = default;
self_type& operator=(self_type&& e) = default;
template <class E>
pyarray(const xexpression<E>& e);
template <class E>
self_type& operator=(const xexpression<E>& e);
using base_type::begin;
using base_type::end;
static self_type ensure(pybind11::handle h);
static bool check_(pybind11::handle h);
#if (PYBIND11_VERSION_MAJOR == 2 && PYBIND11_VERSION_MINOR >= 3) || PYBIND11_VERSION_MAJOR >= 3
// Prevent ambiguous overload resolution for operators defined for
// both xt::xcontainer_semantic and pybind11::object.
using semantic_base::operator+=;
using semantic_base::operator-=;
using semantic_base::operator*=;
using semantic_base::operator/=;
using semantic_base::operator|=;
using semantic_base::operator&=;
using semantic_base::operator^=;
// using semantic_base::operator<<=;
// using semantic_base::operator>>=;
#endif
private:
inner_shape_type m_shape;
inner_strides_type m_strides;
mutable inner_backstrides_type m_backstrides;
storage_type m_storage;
void init_array(const shape_type& shape, const strides_type& strides);
void init_from_python();
const inner_shape_type& shape_impl() const noexcept;
const inner_strides_type& strides_impl() const noexcept;
const inner_backstrides_type& backstrides_impl() const noexcept;
storage_type& storage_impl() noexcept;
const storage_type& storage_impl() const noexcept;
layout_type default_dynamic_layout();
friend class xcontainer<pyarray<T, L>>;
friend class pycontainer<pyarray<T, L>>;
};
/**************************
* pyarray implementation *
**************************/
/**
* @name Constructors
*/
//@{
template <class T, layout_type L>
inline pyarray<T, L>::pyarray()
: base_type()
{
// TODO: avoid allocation
shape_type shape = xtl::make_sequence<shape_type>(0, size_type(1));
strides_type strides = xtl::make_sequence<strides_type>(0, size_type(0));
init_array(shape, strides);
detail::default_initialize(m_storage);
}
/**
* Allocates a pyarray with nested initializer lists.
*/
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(const value_type& t)
: base_type()
{
base_type::resize(xt::shape<shape_type>(t), default_dynamic_layout());
nested_copy(m_storage.begin(), t);
}
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(nested_initializer_list_t<T, 1> t)
: base_type()
{
base_type::resize(xt::shape<shape_type>(t), default_dynamic_layout());
L == layout_type::row_major ? nested_copy(m_storage.begin(), t) : nested_copy(this->template begin<layout_type::row_major>(), t);
}
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(nested_initializer_list_t<T, 2> t)
: base_type()
{
base_type::resize(xt::shape<shape_type>(t), default_dynamic_layout());
L == layout_type::row_major ? nested_copy(m_storage.begin(), t) : nested_copy(this->template begin<layout_type::row_major>(), t);
}
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(nested_initializer_list_t<T, 3> t)
: base_type()
{
base_type::resize(xt::shape<shape_type>(t), default_dynamic_layout());
L == layout_type::row_major ? nested_copy(m_storage.begin(), t) : nested_copy(this->template begin<layout_type::row_major>(), t);
}
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(nested_initializer_list_t<T, 4> t)
: base_type()
{
base_type::resize(xt::shape<shape_type>(t), default_dynamic_layout());
L == layout_type::row_major ? nested_copy(m_storage.begin(), t) : nested_copy(this->template begin<layout_type::row_major>(), t);
}
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(nested_initializer_list_t<T, 5> t)
: base_type()
{
base_type::resize(xt::shape<shape_type>(t), default_dynamic_layout());
L == layout_type::row_major ? nested_copy(m_storage.begin(), t) : nested_copy(this->template begin<layout_type::row_major>(), t);
}
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(pybind11::handle h, pybind11::object::borrowed_t b)
: base_type(h, b)
{
init_from_python();
}
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(pybind11::handle h, pybind11::object::stolen_t s)
: base_type(h, s)
{
init_from_python();
}
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(const pybind11::object& o)
: base_type(o)
{
init_from_python();
}
/**
* Allocates an uninitialized pyarray with the specified shape and
* layout.
* @param shape the shape of the pyarray
* @param l the layout of the pyarray
*/
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(const shape_type& shape, layout_type l)
: base_type()
{
strides_type strides(shape.size());
compute_strides(shape, l, strides);
init_array(shape, strides);
}
/**
* Allocates a pyarray with the specified shape and layout. Elements
* are initialized to the specified value.
* @param shape the shape of the pyarray
* @param value the value of the elements
* @param l the layout of the pyarray
*/
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(const shape_type& shape, const_reference value, layout_type l)
: base_type()
{
strides_type strides(shape.size());
compute_strides(shape, l, strides);
init_array(shape, strides);
std::fill(m_storage.begin(), m_storage.end(), value);
}
/**
* Allocates an uninitialized pyarray with the specified shape and strides.
* Elements are initialized to the specified value.
* @param shape the shape of the pyarray
* @param strides the strides of the pyarray
* @param value the value of the elements
*/
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(const shape_type& shape, const strides_type& strides, const_reference value)
: base_type()
{
init_array(shape, strides);
std::fill(m_storage.begin(), m_storage.end(), value);
}
/**
* Allocates an uninitialized pyarray with the specified shape and strides.
* @param shape the shape of the pyarray
* @param strides the strides of the pyarray
*/
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(const shape_type& shape, const strides_type& strides)
: base_type()
{
init_array(shape, strides);
}
/**
* Allocates and returns an pyarray with the specified shape.
* @param shape the shape of the pyarray
*/
template <class T, layout_type L>
template <class S>
inline pyarray<T, L> pyarray<T, L>::from_shape(S&& shape)
{
auto shp = xtl::forward_sequence<shape_type, S>(shape);
return self_type(shp);
}
//@}
/**
* @name Copy semantic
*/
//@{
/**
* The copy constructor.
*/
template <class T, layout_type L>
inline pyarray<T, L>::pyarray(const self_type& rhs)
: base_type(), semantic_base(rhs)
{
auto tmp = pybind11::reinterpret_steal<pybind11::object>(
PyArray_NewLikeArray(rhs.python_array(), NPY_KEEPORDER, nullptr, 1));
if (!tmp)
{
throw std::runtime_error("NumPy: unable to create ndarray");
}
this->m_ptr = tmp.release().ptr();
init_from_python();
std::copy(rhs.storage().cbegin(), rhs.storage().cend(), this->storage().begin());
}
/**
* The assignment operator.
*/
template <class T, layout_type L>
inline auto pyarray<T, L>::operator=(const self_type& rhs) -> self_type&
{
self_type tmp(rhs);
*this = std::move(tmp);
return *this;
}
//@}
/**
* @name Extended copy semantic
*/
//@{
/**
* The extended copy constructor.
*/
template <class T, layout_type L>
template <class E>
inline pyarray<T, L>::pyarray(const xexpression<E>& e)
: base_type()
{
// TODO: prevent intermediary shape allocation
shape_type shape = xtl::forward_sequence<shape_type, decltype(e.derived_cast().shape())>(e.derived_cast().shape());
strides_type strides = xtl::make_sequence<strides_type>(shape.size(), size_type(0));
layout_type layout = default_dynamic_layout();
compute_strides(shape, layout, strides);
init_array(shape, strides);
semantic_base::assign(e);
}
/**
* The extended assignment operator.
*/
template <class T, layout_type L>
template <class E>
inline auto pyarray<T, L>::operator=(const xexpression<E>& e) -> self_type&
{
return semantic_base::operator=(e);
}
//@}
template <class T, layout_type L>
inline auto pyarray<T, L>::ensure(pybind11::handle h) -> self_type
{
return base_type::ensure(h);
}
template <class T, layout_type L>
inline bool pyarray<T, L>::check_(pybind11::handle h)
{
return base_type::check_(h);
}
template <class T, layout_type L>
inline void pyarray<T, L>::init_array(const shape_type& shape, const strides_type& strides)
{
strides_type adapted_strides(strides);
std::transform(strides.begin(), strides.end(), adapted_strides.begin(),
[](auto v) { return sizeof(T) * v; });
int flags = NPY_ARRAY_ALIGNED;
if (!std::is_const<T>::value)
{
flags |= NPY_ARRAY_WRITEABLE;
}
auto dtype = pybind11::detail::npy_format_descriptor<T>::dtype();
npy_intp* shape_data = reinterpret_cast<npy_intp*>(const_cast<size_type*>(shape.data()));
npy_intp* strides_data = reinterpret_cast<npy_intp*>(adapted_strides.data());
auto tmp = pybind11::reinterpret_steal<pybind11::object>(
PyArray_NewFromDescr(&PyArray_Type, (PyArray_Descr*) dtype.release().ptr(), static_cast<int>(shape.size()), shape_data, strides_data,
nullptr, flags, nullptr));
if (!tmp)
{
throw std::runtime_error("NumPy: unable to create ndarray");
}
this->m_ptr = tmp.release().ptr();
init_from_python();
}
template <class T, layout_type L>
inline void pyarray<T, L>::init_from_python()
{
if (!static_cast<bool>(*this))
{
return;
}
m_shape = inner_shape_type(reinterpret_cast<size_type*>(PyArray_SHAPE(this->python_array())),
static_cast<size_type>(PyArray_NDIM(this->python_array())));
m_strides = inner_strides_type(reinterpret_cast<difference_type*>(PyArray_STRIDES(this->python_array())),
static_cast<size_type>(PyArray_NDIM(this->python_array())),
reinterpret_cast<size_type*>(PyArray_SHAPE(this->python_array())));
if (L != layout_type::dynamic && !do_strides_match(m_shape, m_strides, L, 1))
{
throw std::runtime_error("NumPy: passing container with bad strides for layout (is it a view?).");
}
m_backstrides = backstrides_type(*this);
m_storage = storage_type(reinterpret_cast<pointer>(PyArray_DATA(this->python_array())),
this->get_buffer_size());
}
template <class T, layout_type L>
inline auto pyarray<T, L>::shape_impl() const noexcept -> const inner_shape_type&
{
return m_shape;
}
template <class T, layout_type L>
inline auto pyarray<T, L>::strides_impl() const noexcept -> const inner_strides_type&
{
return m_strides;
}
template <class T, layout_type L>
inline auto pyarray<T, L>::backstrides_impl() const noexcept -> const inner_backstrides_type&
{
// m_backstrides wraps the numpy array backstrides, which is a raw pointer.
// The address of the raw pointer stored in the wrapper would be invalidated when the pyarray is copied.
// Hence, we build a new backstrides object (cheap wrapper around the underlying pointer) upon access.
m_backstrides = backstrides_type(*this);
return m_backstrides;
}
template <class T, layout_type L>
inline auto pyarray<T, L>::storage_impl() noexcept -> storage_type&
{
return m_storage;
}
template <class T, layout_type L>
inline auto pyarray<T, L>::storage_impl() const noexcept -> const storage_type&
{
return m_storage;
}
template <class T, layout_type L>
layout_type pyarray<T, L>::default_dynamic_layout()
{
return L == layout_type::dynamic ? layout_type::row_major : L;
}
}
#endif
================================================
FILE: include/xtensor-python/pyarray_backstrides.hpp
================================================
/***************************************************************************
* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay *
* Copyright (c) QuantStack *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
#ifndef PY_ARRAY_BACKSTRIDES_HPP
#define PY_ARRAY_BACKSTRIDES_HPP
#include <cstddef>
#include <iterator>
namespace xt
{
/**************************
* pybackstrides_iterator *
**************************/
template <class B>
class pybackstrides_iterator
{
public:
using self_type = pybackstrides_iterator<B>;
using value_type = typename B::value_type;
using pointer = const value_type*;
using reference = value_type;
using difference_type = std::ptrdiff_t;
using iterator_category = std::random_access_iterator_tag;
pybackstrides_iterator(const B* b, std::size_t offset);
reference operator*() const;
pointer operator->() const;
reference operator[](difference_type n) const;
self_type& operator++();
self_type& operator--();
self_type operator++(int);
self_type operator--(int);
self_type& operator+=(difference_type n);
self_type& operator-=(difference_type n);
self_type operator+(difference_type n) const;
self_type operator-(difference_type n) const;
self_type operator-(const self_type& rhs) const;
std::size_t offset() const;
private:
const B* p_b;
std::size_t m_offset;
};
template <class B>
inline bool operator==(const pybackstrides_iterator<B>& lhs,
const pybackstrides_iterator<B>& rhs);
template <class B>
inline bool operator!=(const pybackstrides_iterator<B>& lhs,
const pybackstrides_iterator<B>& rhs);
template <class B>
inline bool operator<(const pybackstrides_iterator<B>& lhs,
const pybackstrides_iterator<B>& rhs);
template <class B>
inline bool operator<=(const pybackstrides_iterator<B>& lhs,
const pybackstrides_iterator<B>& rhs);
template <class B>
inline bool operator>(const pybackstrides_iterator<B>& lhs,
const pybackstrides_iterator<B>& rhs);
template <class B>
inline bool operator>=(const pybackstrides_iterator<B>& lhs,
const pybackstrides_iterator<B>& rhs);
/***********************
* pyarray_backstrides *
***********************/
template <class A>
class pyarray_backstrides
{
public:
using self_type = pyarray_backstrides<A>;
using array_type = A;
using value_type = typename array_type::size_type;
using const_reference = value_type;
using reference = const_reference;
using const_pointer = const value_type*;
using pointer = const_pointer;
using size_type = typename array_type::size_type;
using difference_type = typename array_type::difference_type;
using const_iterator = pybackstrides_iterator<self_type>;
using iterator = const_iterator;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
pyarray_backstrides() = default;
pyarray_backstrides(const array_type& a);
bool empty() const;
size_type size() const;
value_type operator[](size_type i) const;
const_reference front() const;
const_reference back() const;
const_iterator begin() const;
const_iterator end() const;
const_iterator cbegin() const;
const_iterator cend() const;
const_reverse_iterator rbegin() const;
const_reverse_iterator rend() const;
const_reverse_iterator crbegin() const;
const_reverse_iterator crend() const;
private:
const array_type* p_a;
};
/*****************************************
* pybackstrides_iterator implementation *
*****************************************/
template <class B>
inline pybackstrides_iterator<B>::pybackstrides_iterator(const B* b, std::size_t offset)
: p_b(b), m_offset(offset)
{
}
template <class B>
inline auto pybackstrides_iterator<B>::operator*() const -> reference
{
return p_b->operator[](m_offset);
}
template <class B>
inline auto pybackstrides_iterator<B>::operator->() const -> pointer
{
// Returning the address of a temporary
value_type res = p_b->operator[](m_offset);
return &res;
}
template <class B>
inline auto pybackstrides_iterator<B>::operator[](difference_type n) const -> reference
{
return p_b->operator[](m_offset + n);
}
template <class B>
inline auto pybackstrides_iterator<B>::operator++() -> self_type&
{
++m_offset;
return *this;
}
template <class B>
inline auto pybackstrides_iterator<B>::operator--() -> self_type&
{
--m_offset;
return *this;
}
template <class B>
inline auto pybackstrides_iterator<B>::operator++(int )-> self_type
{
self_type tmp(*this);
++m_offset;
return tmp;
}
template <class B>
inline auto pybackstrides_iterator<B>::operator--(int) -> self_type
{
self_type tmp(*this);
--m_offset;
return tmp;
}
template <class B>
inline auto pybackstrides_iterator<B>::operator+=(difference_type n) -> self_type&
{
m_offset += n;
return *this;
}
template <class B>
inline auto pybackstrides_iterator<B>::operator-=(difference_type n) -> self_type&
{
m_offset -= n;
return *this;
}
template <class B>
inline auto pybackstrides_iterator<B>::operator+(difference_type n) const -> self_type
{
return self_type(p_b, m_offset + n);
}
template <class B>
inline auto pybackstrides_iterator<B>::operator-(difference_type n) const -> self_type
{
return self_type(p_b, m_offset - n);
}
template <class B>
inline auto pybackstrides_iterator<B>::operator-(const self_type& rhs) const -> self_type
{
self_type tmp(*this);
tmp -= (m_offset - rhs.m_offset);
return tmp;
}
template <class B>
inline std::size_t pybackstrides_iterator<B>::offset() const
{
return m_offset;
}
template <class B>
inline bool operator==(const pybackstrides_iterator<B>& lhs,
const pybackstrides_iterator<B>& rhs)
{
return lhs.offset() == rhs.offset();
}
template <class B>
inline bool operator!=(const pybackstrides_iterator<B>& lhs,
const pybackstrides_iterator<B>& rhs)
{
return !(lhs == rhs);
}
template <class B>
inline bool operator<(const pybackstrides_iterator<B>& lhs,
const pybackstrides_iterator<B>& rhs)
{
return lhs.offset() < rhs.offset();
}
template <class B>
inline bool operator<=(const pybackstrides_iterator<B>& lhs,
const pybackstrides_iterator<B>& rhs)
{
return (lhs < rhs) || (lhs == rhs);
}
template <class B>
inline bool operator>(const pybackstrides_iterator<B>& lhs,
const pybackstrides_iterator<B>& rhs)
{
return !(lhs <= rhs);
}
template <class B>
inline bool operator>=(const pybackstrides_iterator<B>& lhs,
const pybackstrides_iterator<B>& rhs)
{
return !(lhs < rhs);
}
/**************************************
* pyarray_backstrides implementation *
**************************************/
template <class A>
inline pyarray_backstrides<A>::pyarray_backstrides(const array_type& a)
: p_a(&a)
{
}
template <class A>
inline bool pyarray_backstrides<A>::empty() const
{
return p_a->dimension() == 0;
}
template <class A>
inline auto pyarray_backstrides<A>::size() const -> size_type
{
return p_a->dimension();
}
template <class A>
inline auto pyarray_backstrides<A>::operator[](size_type i) const -> value_type
{
value_type sh = p_a->shape()[i];
value_type res = sh == 1 ? 0 : (sh - 1) * p_a->strides()[i];
return res;
}
template <class A>
inline auto pyarray_backstrides<A>::front() const -> const_reference
{
value_type sh = p_a->shape()[0];
value_type res = sh == 1 ? 0 : (sh - 1) * p_a->strides()[0];
return res;
}
template <class A>
inline auto pyarray_backstrides<A>::back() const -> const_reference
{
auto index = p_a->size() - 1;
value_type sh = p_a->shape()[index];
value_type res = sh == 1 ? 0 : (sh - 1) * p_a->strides()[index];
return res;
}
template <class A>
inline auto pyarray_backstrides<A>::begin() const -> const_iterator
{
return cbegin();
}
template <class A>
inline auto pyarray_backstrides<A>::end() const -> const_iterator
{
return cend();
}
template <class A>
inline auto pyarray_backstrides<A>::cbegin() const -> const_iterator
{
return const_iterator(this, 0);
}
template <class A>
inline auto pyarray_backstrides<A>::cend() const -> const_iterator
{
return const_iterator(this, size());
}
template <class A>
inline auto pyarray_backstrides<A>::rbegin() const -> const_reverse_iterator
{
return crbegin();
}
template <class A>
inline auto pyarray_backstrides<A>::rend() const -> const_reverse_iterator
{
return crend();
}
template <class A>
inline auto pyarray_backstrides<A>::crbegin() const -> const_reverse_iterator
{
return const_reverse_iterator(end());
}
template <class A>
inline auto pyarray_backstrides<A>::crend() const -> const_reverse_iterator
{
return const_reverse_iterator(begin());
}
}
#endif
================================================
FILE: include/xtensor-python/pycontainer.hpp
================================================
/***************************************************************************
* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay *
* Copyright (c) QuantStack *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
#ifndef PY_CONTAINER_HPP
#define PY_CONTAINER_HPP
#include <cmath>
#include <functional>
#include <numeric>
#include <sstream>
#include "pybind11/complex.h"
#include "pybind11/pybind11.h"
#include "pybind11/numpy.h"
#ifndef FORCE_IMPORT_ARRAY
#define NO_IMPORT_ARRAY
#endif
#ifndef PY_ARRAY_UNIQUE_SYMBOL
#define PY_ARRAY_UNIQUE_SYMBOL xtensor_python_ARRAY_API
#endif
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include "numpy/arrayobject.h"
// Required because pyconfig.hpp defines copysign to _copysign
#undef copysign
#include <cmath>
#include "xtensor/containers/xcontainer.hpp"
#include "xtl/xsequence.hpp"
namespace xt
{
inline void import_numpy();
/**
* @class pycontainer
* @brief Base class for xtensor containers wrapping numpy arryays.
*
* The pycontainer class should not be instantiated directly. Instead, used should
* use pytensor and pyarray instancs.
*
* @tparam D The derived type, i.e. the inheriting class for which pycontainer
* provides the interface.
*/
template <class D>
class pycontainer : public pybind11::object,
public xcontainer<D>
{
public:
using derived_type = D;
using base_type = xcontainer<D>;
using inner_types = xcontainer_inner_types<D>;
using storage_type = typename inner_types::storage_type;
using value_type = typename storage_type::value_type;
using reference = typename storage_type::reference;
using const_reference = typename storage_type::const_reference;
using pointer = typename storage_type::pointer;
using const_pointer = typename storage_type::const_pointer;
using size_type = typename storage_type::size_type;
using difference_type = typename storage_type::difference_type;
using shape_type = typename inner_types::shape_type;
using strides_type = typename inner_types::strides_type;
using backstrides_type = typename inner_types::backstrides_type;
using inner_shape_type = typename inner_types::inner_shape_type;
using inner_strides_type = typename inner_types::inner_strides_type;
using iterable_base = xcontainer<D>;
using iterator = typename iterable_base::iterator;
using const_iterator = typename iterable_base::const_iterator;
using stepper = typename iterable_base::stepper;
using const_stepper = typename iterable_base::const_stepper;
template <class S = shape_type>
void resize(const S& shape);
template <class S = shape_type>
void resize(const S& shape, layout_type l);
template <class S = shape_type>
void resize(const S& shape, const strides_type& strides);
template <class S = shape_type>
auto& reshape(S&& shape, layout_type layout = base_type::static_layout) &;
layout_type layout() const;
bool is_contiguous() const noexcept;
using base_type::operator();
using base_type::operator[];
using base_type::begin;
using base_type::end;
protected:
pycontainer();
~pycontainer() = default;
pycontainer(pybind11::handle h, borrowed_t);
pycontainer(pybind11::handle h, stolen_t);
pycontainer(const pybind11::object& o);
pycontainer(const pycontainer&) = default;
pycontainer& operator=(const pycontainer&) = default;
pycontainer(pycontainer&&) = default;
pycontainer& operator=(pycontainer&&) = default;
static derived_type ensure(pybind11::handle h);
static bool check_(pybind11::handle h);
static PyObject* raw_array_t(PyObject* ptr);
derived_type& derived_cast();
const derived_type& derived_cast() const;
PyArrayObject* python_array() const;
size_type get_buffer_size() const;
private:
#if (PYBIND11_VERSION_MAJOR == 2 && PYBIND11_VERSION_MINOR >= 3) || PYBIND11_VERSION_MAJOR >= 3
// Prevent ambiguous overload resolution for operators defined for
// both xt::xcontainer and pybind11::object.
using pybind11::object::operator~;
using pybind11::object::operator+;
using pybind11::object::operator-;
using pybind11::object::operator*;
using pybind11::object::operator/;
using pybind11::object::operator|;
using pybind11::object::operator&;
using pybind11::object::operator^;
using pybind11::object::operator<<;
using pybind11::object::operator>>;
#endif
};
namespace detail
{
template <class T, class E = void>
struct numpy_traits;
template <class T>
struct numpy_traits<T, std::enable_if_t<pybind11::detail::satisfies_any_of<T, std::is_arithmetic, xtl::is_complex>::value>>
{
private:
// On Windows 64 bits, NPY_INT != NPY_INT32 and NPY_UINT != NPY_UINT32
// We use the NPY_INT32 and NPY_UINT32 which are consistent with the values
// of NPY_LONG and NPY_ULONG
// On Linux x64, NPY_INT64 != NPY_LONGLONG and NPY_UINT64 != NPY_ULONGLONG,
// we use the values of NPY_INT64 and NPY_UINT64 which are consistent with the
// values of NPY_LONG and NPY_ULONG.
constexpr static const int value_list[15] = {
NPY_BOOL,
NPY_BYTE, NPY_UBYTE, NPY_SHORT, NPY_USHORT,
NPY_INT32, NPY_UINT32, NPY_INT64, NPY_UINT64,
NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE,
NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE};
public:
using value_type = std::remove_const_t<T>;
static constexpr int type_num = value_list[pybind11::detail::is_fmt_numeric<value_type>::index];
};
// On Linux x64, NPY_INT64 != NPY_LONGLONG and NPY_UINT64 != NPY_ULONGLONG
// NPY_LONGLONG and NPY_ULONGLONG must be adjusted so the right type is
// selected
template <bool>
struct numpy_enum_adjuster
{
static inline int pyarray_type(PyArrayObject* obj)
{
return PyArray_TYPE(obj);
}
};
template <>
struct numpy_enum_adjuster<true>
{
static inline int pyarray_type(PyArrayObject* obj)
{
int res = PyArray_TYPE(obj);
if(res == NPY_LONGLONG || res == NPY_ULONGLONG)
{
res -= 2;
}
return res;
}
};
inline int pyarray_type(PyArrayObject* obj)
{
return numpy_enum_adjuster<NPY_LONGLONG != NPY_INT64>::pyarray_type(obj);
}
template <class T>
void default_initialize_impl(T& /*storage*/, std::false_type)
{
}
template <class T>
void default_initialize_impl(T& storage, std::true_type)
{
using value_type = typename T::value_type;
storage[0] = value_type{};
}
template <class T>
void default_initialize(T& storage)
{
using value_type = typename T::value_type;
default_initialize_impl(storage, std::is_copy_assignable<value_type>());
}
template <class T>
bool check_array_type(const pybind11::handle& src, std::true_type)
{
int type_num = xt::detail::numpy_traits<T>::type_num;
return xt::detail::pyarray_type(reinterpret_cast<PyArrayObject*>(src.ptr())) == type_num;
}
template <class T>
bool check_array_type(const pybind11::handle& src, std::false_type)
{
return PyArray_EquivTypes((PyArray_Descr*) pybind11::detail::array_proxy(src.ptr())->descr,
(PyArray_Descr*) pybind11::dtype::of<T>().ptr());
}
template <class T>
bool check_array(const pybind11::handle& src)
{
using is_arithmetic_type = std::integral_constant<bool, bool(pybind11::detail::satisfies_any_of<T, std::is_arithmetic, xtl::is_complex>::value)>;
return PyArray_Check(src.ptr()) && check_array_type<T>(src, is_arithmetic_type{});
}
}
/******************************
* pycontainer implementation *
******************************/
template <class D>
inline pycontainer<D>::pycontainer()
: pybind11::object()
{
}
template <class D>
inline pycontainer<D>::pycontainer(pybind11::handle h, borrowed_t b)
: pybind11::object(h, b)
{
}
template <class D>
inline pycontainer<D>::pycontainer(pybind11::handle h, stolen_t s)
: pybind11::object(h, s)
{
}
template <class D>
inline pycontainer<D>::pycontainer(const pybind11::object& o)
: pybind11::object(raw_array_t(o.ptr()), pybind11::object::stolen_t{})
{
if (!this->m_ptr)
{
throw pybind11::error_already_set();
}
}
template <class D>
inline auto pycontainer<D>::ensure(pybind11::handle h) -> derived_type
{
auto result = pybind11::reinterpret_steal<derived_type>(raw_array_t(h.ptr()));
if (result.ptr() == nullptr)
{
PyErr_Clear();
}
return result;
}
template <class D>
inline bool pycontainer<D>::check_(pybind11::handle h)
{
return detail::check_array<typename D::value_type>(h);
}
template <class D>
inline PyObject* pycontainer<D>::raw_array_t(PyObject* ptr)
{
if (ptr == nullptr)
{
return nullptr;
}
auto dtype = pybind11::detail::npy_format_descriptor<value_type>::dtype();
auto res = PyArray_FromAny(ptr, (PyArray_Descr *) dtype.release().ptr(), 0, 0,
NPY_ARRAY_ENSUREARRAY | NPY_ARRAY_FORCECAST, nullptr);
return res;
}
template <class D>
inline PyArrayObject* pycontainer<D>::python_array() const
{
return reinterpret_cast<PyArrayObject*>(this->m_ptr);
}
template <class D>
inline auto pycontainer<D>::get_buffer_size() const -> size_type
{
const size_type& (*min)(const size_type&, const size_type&) = std::min<size_type>;
size_type min_stride = this->strides().empty() ? size_type(1) :
std::max(size_type(1), std::accumulate(this->strides().cbegin(),
this->strides().cend(),
std::numeric_limits<size_type>::max(),
min));
return min_stride * static_cast<size_type>(PyArray_SIZE(this->python_array()));
}
template <class D>
inline auto pycontainer<D>::derived_cast() -> derived_type&
{
return *static_cast<derived_type*>(this);
}
template <class D>
inline auto pycontainer<D>::derived_cast() const -> const derived_type&
{
return *static_cast<const derived_type*>(this);
}
namespace detail
{
template <class S>
struct check_dims
{
static bool run(std::size_t)
{
return true;
}
};
template <class T, std::size_t N>
struct check_dims<std::array<T, N>>
{
static bool run(std::size_t new_dim)
{
if(new_dim != N)
{
std::ostringstream err_msg;
err_msg << "Invalid conversion to pycontainer, expecting a container of dimension "
<< N << ", got a container of dimension " << new_dim << ".";
throw std::runtime_error(err_msg.str());
}
return new_dim == N;
}
};
}
/**
* resizes the container.
* @param shape the new shape
*/
template <class D>
template <class S>
inline void pycontainer<D>::resize(const S& shape)
{
if (shape.size() != this->dimension() || !std::equal(std::begin(shape), std::end(shape), std::begin(this->shape())))
{
resize(shape, layout_type::row_major);
}
}
/**
* resizes the container.
* @param shape the new shape
* @param l the new layout
*/
template <class D>
template <class S>
inline void pycontainer<D>::resize(const S& shape, layout_type l)
{
strides_type strides = xtl::make_sequence<strides_type>(shape.size(), size_type(1));
compute_strides(shape, l, strides);
resize(shape, strides);
}
/**
* resizes the container.
* @param shape the new shape
* @param strides the new strides
*/
template <class D>
template <class S>
inline void pycontainer<D>::resize(const S& shape, const strides_type& strides)
{
detail::check_dims<shape_type>::run(shape.size());
derived_type tmp(xtl::forward_sequence<shape_type, decltype(shape)>(shape), strides);
*static_cast<derived_type*>(this) = std::move(tmp);
}
template <class D>
template <class S>
inline auto& pycontainer<D>::reshape(S&& shape, layout_type layout) &
{
if (compute_size(shape) != this->size())
{
throw std::runtime_error("Cannot reshape with incorrect number of elements (" + std::to_string(this->size()) + " vs " + std::to_string(compute_size(shape)) + ")");
}
detail::check_dims<shape_type>::run(shape.size());
layout = default_assignable_layout(layout);
NPY_ORDER npy_layout;
if (layout == layout_type::row_major)
{
npy_layout = NPY_CORDER;
}
else if (layout == layout_type::column_major)
{
npy_layout = NPY_FORTRANORDER;
}
else
{
throw std::runtime_error("Cannot reshape with unknown layout_type.");
}
using shape_ptr = typename std::decay_t<S>::pointer;
PyArray_Dims dims = {reinterpret_cast<npy_intp*>(const_cast<shape_ptr>(shape.data())), static_cast<int>(shape.size())};
auto new_ptr = PyArray_Newshape((PyArrayObject*) this->ptr(), &dims, npy_layout);
auto old_ptr = this->ptr();
this->ptr() = new_ptr;
Py_XDECREF(old_ptr);
this->derived_cast().init_from_python();
return *this;
}
/**
* Return the layout_type of the container
* @return layout_type of the container
*/
template <class D>
inline layout_type pycontainer<D>::layout() const
{
if (PyArray_CHKFLAGS(python_array(), NPY_ARRAY_C_CONTIGUOUS))
{
return layout_type::row_major;
}
else if (PyArray_CHKFLAGS(python_array(), NPY_ARRAY_F_CONTIGUOUS))
{
return layout_type::column_major;
}
else
{
return layout_type::dynamic;
}
}
/**
* Return whether or not the container uses contiguous buffer
* @return Boolean for contiguous buffer
*/
template <class D>
inline bool pycontainer<D>::is_contiguous() const noexcept
{
if (this->strides().size() == 0)
{
return true;
}
else if (PyArray_CHKFLAGS(python_array(), NPY_ARRAY_C_CONTIGUOUS))
{
return 1 == this->strides().back();
}
else if (PyArray_CHKFLAGS(python_array(), NPY_ARRAY_F_CONTIGUOUS))
{
return 1 == this->strides().front();
}
else
{
return false;
}
}
/**
* Import the numpy Python module.
*/
inline void import_numpy()
{
#ifdef FORCE_IMPORT_ARRAY
if (_import_array() < 0)
{
PyErr_Print();
PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import");
}
#endif
}
#if defined(__GNUC__) && !defined(__clang__)
namespace workaround
{
// Fixes "undefined symbol" issues
inline void long_long_allocator()
{
std::allocator<long long> a;
std::allocator<unsigned long long> b;
std::allocator<double> c;
std::allocator<std::complex<double>> d;
}
}
#endif
}
#endif
================================================
FILE: include/xtensor-python/pynative_casters.hpp
================================================
/***************************************************************************
* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay *
* Copyright (c) QuantStack *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
#ifndef PYNATIVE_CASTERS_HPP
#define PYNATIVE_CASTERS_HPP
#include "xtensor_type_caster_base.hpp"
namespace pybind11
{
namespace detail
{
// Type caster for casting xarray to ndarray
template <class T, xt::layout_type L>
struct type_caster<xt::xarray<T, L>> : xtensor_type_caster_base<xt::xarray<T, L>>
{
};
// Type caster for casting xt::xtensor to ndarray
template <class T, std::size_t N, xt::layout_type L>
struct type_caster<xt::xtensor<T, N, L>> : xtensor_type_caster_base<xt::xtensor<T, N, L>>
{
};
// Type caster for casting xt::xtensor_fixed to ndarray
template <class T, class FSH, xt::layout_type L>
struct type_caster<xt::xtensor_fixed<T, FSH, L>> : xtensor_type_caster_base<xt::xtensor_fixed<T, FSH, L>>
{
};
// Type caster for casting xt::xstrided_view to ndarray
template <class CT, class S, xt::layout_type L, class FST>
struct type_caster<xt::xstrided_view<CT, S, L, FST>> : xtensor_type_caster_base<xt::xstrided_view<CT, S, L, FST>>
{
};
// Type caster for casting xt::xarray_adaptor to ndarray
template <class EC, xt::layout_type L, class SC, class Tag>
struct type_caster<xt::xarray_adaptor<EC, L, SC, Tag>> : xtensor_type_caster_base<xt::xarray_adaptor<EC, L, SC, Tag>>
{
};
// Type caster for casting xt::xtensor_adaptor to ndarray
template <class EC, std::size_t N, xt::layout_type L, class Tag>
struct type_caster<xt::xtensor_adaptor<EC, N, L, Tag>> : xtensor_type_caster_base<xt::xtensor_adaptor<EC, N, L, Tag>>
{
};
}
}
#endif
================================================
FILE: include/xtensor-python/pystrides_adaptor.hpp
================================================
/***************************************************************************
* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay *
* Copyright (c) QuantStack *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
#ifndef PYSTRIDES_ADAPTOR_HPP
#define PYSTRIDES_ADAPTOR_HPP
#include <cstddef>
#include <iterator>
namespace xt
{
template <std::size_t N>
class pystrides_iterator;
/*********************************
* pystrides_adaptor declaration *
*********************************/
template <std::size_t N>
class pystrides_adaptor
{
public:
using value_type = std::ptrdiff_t;
using const_reference = value_type;
using reference = const_reference;
using const_pointer = const value_type*;
using pointer = const_pointer;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using const_iterator = pystrides_iterator<N>;
using iterator = const_iterator;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
using reverse_iterator = const_reverse_iterator;
using shape_type = size_t*;
pystrides_adaptor() = default;
pystrides_adaptor(const_pointer data, size_type size, shape_type shape);
bool empty() const noexcept;
size_type size() const noexcept;
const_reference operator[](size_type i) const;
const_reference front() const;
const_reference back() const;
const_iterator begin() const;
const_iterator end() const;
const_iterator cbegin() const;
const_iterator cend() const;
const_reverse_iterator rbegin() const;
const_reverse_iterator rend() const;
const_reverse_iterator crbegin() const;
const_reverse_iterator crend() const;
private:
const_pointer p_data;
size_type m_size;
shape_type p_shape;
};
/**********************************
* pystrides_iterator declaration *
**********************************/
template <std::size_t N>
class pystrides_iterator
{
public:
using self_type = pystrides_iterator<N>;
using value_type = typename pystrides_adaptor<N>::value_type;
using pointer = typename pystrides_adaptor<N>::const_pointer;
using reference = typename pystrides_adaptor<N>::const_reference;
using difference_type = typename pystrides_adaptor<N>::difference_type;
using iterator_category = std::random_access_iterator_tag;
using shape_pointer = typename pystrides_adaptor<N>::shape_type;
pystrides_iterator() = default;
inline pystrides_iterator(pointer current, shape_pointer shape)
: p_current(current)
, p_shape(shape)
{
}
inline reference operator*() const
{
return *p_shape == size_t(1) ? 0 : *p_current / N;
}
inline pointer operator->() const
{
// Returning the address of a temporary
value_type res = this->operator*();
return &res;
}
inline reference operator[](difference_type n) const
{
return *(p_current + n) / N;
}
inline self_type& operator++()
{
++p_current;
++p_shape;
return *this;
}
inline self_type& operator--()
{
--p_current;
--p_shape;
return *this;
}
inline self_type operator++(int)
{
self_type tmp(*this);
++p_current;
++p_shape;
return tmp;
}
inline self_type operator--(int)
{
self_type tmp(*this);
--p_current;
--p_shape;
return tmp;
}
inline self_type& operator+=(difference_type n)
{
p_current += n;
p_shape += n;
return *this;
}
inline self_type& operator-=(difference_type n)
{
p_current -= n;
p_shape -= n;
return *this;
}
inline self_type operator+(difference_type n) const
{
return self_type(p_current + n, p_shape + n);
}
inline self_type operator-(difference_type n) const
{
return self_type(p_current - n, p_shape - n);
}
inline difference_type operator-(const self_type& rhs) const
{
self_type tmp(*this);
return p_current - rhs.p_current;
}
pointer get_pointer() const { return p_current; }
private:
pointer p_current;
shape_pointer p_shape;
};
template <std::size_t N>
inline bool operator==(const pystrides_iterator<N>& lhs,
const pystrides_iterator<N>& rhs)
{
return lhs.get_pointer() == rhs.get_pointer();
}
template <std::size_t N>
inline bool operator!=(const pystrides_iterator<N>& lhs,
const pystrides_iterator<N>& rhs)
{
return !(lhs == rhs);
}
template <std::size_t N>
inline bool operator<(const pystrides_iterator<N>& lhs,
const pystrides_iterator<N>& rhs)
{
return lhs.get_pointer() < rhs.get_pointer();
}
template <std::size_t N>
inline bool operator<=(const pystrides_iterator<N>& lhs,
const pystrides_iterator<N>& rhs)
{
return (lhs < rhs) || (lhs == rhs);
}
template <std::size_t N>
inline bool operator>(const pystrides_iterator<N>& lhs,
const pystrides_iterator<N>& rhs)
{
return !(lhs <= rhs);
}
template <std::size_t N>
inline bool operator>=(const pystrides_iterator<N>& lhs,
const pystrides_iterator<N>& rhs)
{
return !(lhs < rhs);
}
/************************************
* pystrides_adaptor implementation *
************************************/
template <std::size_t N>
inline pystrides_adaptor<N>::pystrides_adaptor(const_pointer data, size_type size, shape_type shape)
: p_data(data), m_size(size), p_shape(shape)
{
}
template <std::size_t N>
inline bool pystrides_adaptor<N>::empty() const noexcept
{
return m_size == 0;
}
template <std::size_t N>
inline auto pystrides_adaptor<N>::size() const noexcept -> size_type
{
return m_size;
}
template <std::size_t N>
inline auto pystrides_adaptor<N>::operator[](size_type i) const -> const_reference
{
return p_shape[i] == size_t(1) ? 0 : p_data[i] / N;
}
template <std::size_t N>
inline auto pystrides_adaptor<N>::front() const -> const_reference
{
return this->operator[](0);
}
template <std::size_t N>
inline auto pystrides_adaptor<N>::back() const -> const_reference
{
return this->operator[](m_size - 1);
}
template <std::size_t N>
inline auto pystrides_adaptor<N>::begin() const -> const_iterator
{
return cbegin();
}
template <std::size_t N>
inline auto pystrides_adaptor<N>::end() const -> const_iterator
{
return cend();
}
template <std::size_t N>
inline auto pystrides_adaptor<N>::cbegin() const -> const_iterator
{
return const_iterator(p_data, p_shape);
}
template <std::size_t N>
inline auto pystrides_adaptor<N>::cend() const -> const_iterator
{
return const_iterator(p_data + m_size, p_shape + m_size);
}
template <std::size_t N>
inline auto pystrides_adaptor<N>::rbegin() const -> const_reverse_iterator
{
return crbegin();
}
template <std::size_t N>
inline auto pystrides_adaptor<N>::rend() const -> const_reverse_iterator
{
return crend();
}
template <std::size_t N>
inline auto pystrides_adaptor<N>::crbegin() const -> const_reverse_iterator
{
return const_reverse_iterator(cend());
}
template <std::size_t N>
inline auto pystrides_adaptor<N>::crend() const -> const_reverse_iterator
{
return const_reverse_iterator(cbegin());
}
}
#endif
================================================
FILE: include/xtensor-python/pytensor.hpp
================================================
/***************************************************************************
* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay *
* Copyright (c) QuantStack *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
#ifndef PY_TENSOR_HPP
#define PY_TENSOR_HPP
#include <algorithm>
#include <array>
#include <cstddef>
#include "xtensor/containers/xbuffer_adaptor.hpp"
#include "xtensor/core/xiterator.hpp"
#include "xtensor/core/xsemantic.hpp"
#include "xtensor/utils/xutils.hpp"
#include "pycontainer.hpp"
#include "pystrides_adaptor.hpp"
#include "pynative_casters.hpp"
#include "xtensor_type_caster_base.hpp"
#include "xtensor_python_config.hpp"
namespace xt
{
template <class T, std::size_t N, layout_type L = layout_type::dynamic>
class pytensor;
}
namespace pybind11
{
namespace detail
{
#ifdef PYBIND11_DESCR // The macro is removed from pybind11 since 2.3
template <class T, std::size_t N, xt::layout_type L>
struct handle_type_name<xt::pytensor<T, N, L>>
{
static PYBIND11_DESCR name()
{
return _("numpy.ndarray[") + npy_format_descriptor<T>::name() + _("]");
}
};
#endif
template <class T, std::size_t N, xt::layout_type L>
struct pyobject_caster<xt::pytensor<T, N, L>>
{
using type = xt::pytensor<T, N, L>;
bool load(handle src, bool convert)
{
if (!convert)
{
if (!xt::detail::check_array<T>(src))
{
return false;
}
}
try
{
value = type::ensure(src);
}
catch (const std::runtime_error&)
{
return false;
}
return static_cast<bool>(value);
}
static handle cast(const handle& src, return_value_policy, handle)
{
return src.inc_ref();
}
#ifdef PYBIND11_DESCR // The macro is removed from pybind11 since 2.3
PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name());
#else
PYBIND11_TYPE_CASTER(type, _("numpy.ndarray[") + npy_format_descriptor<T>::name + _("]"));
#endif
};
// Type caster for casting ndarray to xexpression<pytensor>
template <class T, std::size_t N, xt::layout_type L>
struct type_caster<xt::xexpression<xt::pytensor<T, N, L>>> : pyobject_caster<xt::pytensor<T, N, L>>
{
using Type = xt::xexpression<xt::pytensor<T, N, L>>;
operator Type&()
{
return this->value;
}
operator const Type&()
{
return this->value;
}
};
} // namespace detail
}
namespace xt
{
namespace detail {
template <std::size_t N>
struct numpy_strides
{
npy_intp value[N];
};
template <>
struct numpy_strides<0>
{
npy_intp* value = nullptr;
};
} // namespace detail
template <class T, std::size_t N, layout_type L>
struct xiterable_inner_types<pytensor<T, N, L>>
: xcontainer_iterable_types<pytensor<T, N, L>>
{
};
template <class T, std::size_t N, layout_type L>
struct xcontainer_inner_types<pytensor<T, N, L>>
{
using storage_type = xbuffer_adaptor<T*>;
using reference = typename storage_type::reference;
using const_reference = typename storage_type::const_reference;
using size_type = typename storage_type::size_type;
using shape_type = std::array<npy_intp, N>;
using strides_type = shape_type;
using backstrides_type = shape_type;
using inner_shape_type = shape_type;
using inner_strides_type = strides_type;
using inner_backstrides_type = backstrides_type;
using temporary_type = pytensor<T, N, L>;
static constexpr layout_type layout = L;
};
/**
* @class pytensor
* @brief Multidimensional container providing the xtensor container semantics wrapping a numpy array.
*
* pytensor is similar to the xtensor container in that it has a static dimensionality.
*
* Unlike the pyarray container, pytensor cannot be reshaped with a different number of dimensions
* and reshapes are not reflected on the Python side. However, pytensor has benefits compared to pyarray
* in terms of performances. pytensor shapes are stack-allocated which makes iteration upon pytensor
* faster than with pyarray.
*
* @tparam T The type of the element stored in the pyarray.
* @sa pyarray
*/
template <class T, std::size_t N, layout_type L>
class pytensor : public pycontainer<pytensor<T, N, L>>,
public xcontainer_semantic<pytensor<T, N, L>>
{
public:
using self_type = pytensor<T, N, L>;
using semantic_base = xcontainer_semantic<self_type>;
using base_type = pycontainer<self_type>;
using storage_type = typename base_type::storage_type;
using value_type = typename base_type::value_type;
using reference = typename base_type::reference;
using const_reference = typename base_type::const_reference;
using pointer = typename base_type::pointer;
using size_type = typename base_type::size_type;
using shape_type = typename base_type::shape_type;
using strides_type = typename base_type::strides_type;
using backstrides_type = typename base_type::backstrides_type;
using inner_shape_type = typename base_type::inner_shape_type;
using inner_strides_type = typename base_type::inner_strides_type;
using inner_backstrides_type = typename base_type::inner_backstrides_type;
constexpr static std::size_t rank = N;
pytensor();
pytensor(nested_initializer_list_t<T, N> t);
pytensor(pybind11::handle h, pybind11::object::borrowed_t);
pytensor(pybind11::handle h, pybind11::object::stolen_t);
pytensor(const pybind11::object& o);
explicit pytensor(const shape_type& shape, layout_type l = layout_type::row_major);
explicit pytensor(const shape_type& shape, const_reference value, layout_type l = layout_type::row_major);
explicit pytensor(const shape_type& shape, const strides_type& strides, const_reference value);
explicit pytensor(const shape_type& shape, const strides_type& strides);
template <class S = shape_type>
static pytensor from_shape(S&& shape);
pytensor(const self_type& rhs);
self_type& operator=(const self_type& rhs);
pytensor(self_type&&) = default;
self_type& operator=(self_type&& e) = default;
template <class E>
pytensor(const xexpression<E>& e);
template <class E>
self_type& operator=(const xexpression<E>& e);
using base_type::begin;
using base_type::end;
static self_type ensure(pybind11::handle h);
static bool check_(pybind11::handle h);
#if (PYBIND11_VERSION_MAJOR == 2 && PYBIND11_VERSION_MINOR >= 3) || (PYBIND11_VERSION_MAJOR >= 3)
// Prevent ambiguous overload resolution for operators defined for
// both xt::xcontainer_semantic and pybind11::object.
using semantic_base::operator+=;
using semantic_base::operator-=;
using semantic_base::operator*=;
using semantic_base::operator/=;
using semantic_base::operator|=;
using semantic_base::operator&=;
using semantic_base::operator^=;
// using semantic_base::operator<<=;
// using semantic_base::operator>>=;
#endif
private:
inner_shape_type m_shape;
inner_strides_type m_strides;
inner_backstrides_type m_backstrides;
storage_type m_storage;
void init_tensor(const shape_type& shape, const strides_type& strides);
void init_from_python();
inner_shape_type& shape_impl() noexcept;
const inner_shape_type& shape_impl() const noexcept;
inner_strides_type& strides_impl() noexcept;
const inner_strides_type& strides_impl() const noexcept;
inner_backstrides_type& backstrides_impl() noexcept;
const inner_backstrides_type& backstrides_impl() const noexcept;
storage_type& storage_impl() noexcept;
const storage_type& storage_impl() const noexcept;
friend class xcontainer<pytensor<T, N, L>>;
friend class pycontainer<pytensor<T, N, L>>;
};
/***************************
* pytensor implementation *
***************************/
/**
* @name Constructors
*/
//@{
/**
* Allocates an uninitialized pytensor that holds 1 element.
*/
template <class T, std::size_t N, layout_type L>
inline pytensor<T, N, L>::pytensor()
: base_type()
{
m_shape = xtl::make_sequence<shape_type>(N, size_type(1));
m_strides = xtl::make_sequence<strides_type>(N, size_type(0));
init_tensor(m_shape, m_strides);
detail::default_initialize(m_storage);
}
/**
* Allocates a pytensor with a nested initializer list.
*/
template <class T, std::size_t N, layout_type L>
inline pytensor<T, N, L>::pytensor(nested_initializer_list_t<T, N> t)
: base_type()
{
base_type::resize(xt::shape<shape_type>(t), layout_type::row_major);
nested_copy(m_storage.begin(), t);
}
template <class T, std::size_t N, layout_type L>
inline pytensor<T, N, L>::pytensor(pybind11::handle h, pybind11::object::borrowed_t b)
: base_type(h, b)
{
init_from_python();
}
template <class T, std::size_t N, layout_type L>
inline pytensor<T, N, L>::pytensor(pybind11::handle h, pybind11::object::stolen_t s)
: base_type(h, s)
{
init_from_python();
}
template <class T, std::size_t N, layout_type L>
inline pytensor<T, N, L>::pytensor(const pybind11::object& o)
: base_type(o)
{
init_from_python();
}
/**
* Allocates an uninitialized pytensor with the specified shape and
* layout.
* @param shape the shape of the pytensor
* @param l the layout_type of the pytensor
*/
template <class T, std::size_t N, layout_type L>
inline pytensor<T, N, L>::pytensor(const shape_type& shape, layout_type l)
{
compute_strides(shape, l, m_strides);
init_tensor(shape, m_strides);
}
/**
* Allocates a pytensor with the specified shape and layout. Elements
* are initialized to the specified value.
* @param shape the shape of the pytensor
* @param value the value of the elements
* @param l the layout_type of the pytensor
*/
template <class T, std::size_t N, layout_type L>
inline pytensor<T, N, L>::pytensor(const shape_type& shape,
const_reference value,
layout_type l)
{
compute_strides(shape, l, m_strides);
init_tensor(shape, m_strides);
std::fill(m_storage.begin(), m_storage.end(), value);
}
/**
* Allocates an uninitialized pytensor with the specified shape and strides.
* Elements are initialized to the specified value.
* @param shape the shape of the pytensor
* @param strides the strides of the pytensor
* @param value the value of the elements
*/
template <class T, std::size_t N, layout_type L>
inline pytensor<T, N, L>::pytensor(const shape_type& shape,
const strides_type& strides,
const_reference value)
{
init_tensor(shape, strides);
std::fill(m_storage.begin(), m_storage.end(), value);
}
/**
* Allocates an uninitialized pytensor with the specified shape and strides.
* @param shape the shape of the pytensor
* @param strides the strides of the pytensor
*/
template <class T, std::size_t N, layout_type L>
inline pytensor<T, N, L>::pytensor(const shape_type& shape,
const strides_type& strides)
{
init_tensor(shape, strides);
}
/**
* Allocates and returns an pytensor with the specified shape.
* @param shape the shape of the pytensor
*/
template <class T, std::size_t N, layout_type L>
template <class S>
inline pytensor<T, N, L> pytensor<T, N, L>::from_shape(S&& shape)
{
detail::check_dims<shape_type>::run(shape.size());
auto shp = xtl::forward_sequence<shape_type, S>(shape);
return self_type(shp);
}
//@}
/**
* @name Copy semantic
*/
//@{
/**
* The copy constructor.
*/
template <class T, std::size_t N, layout_type L>
inline pytensor<T, N, L>::pytensor(const self_type& rhs)
: base_type(), semantic_base(rhs)
{
init_tensor(rhs.shape(), rhs.strides());
std::copy(rhs.storage().cbegin(), rhs.storage().cend(), this->storage().begin());
}
/**
* The assignment operator.
*/
template <class T, std::size_t N, layout_type L>
inline auto pytensor<T, N, L>::operator=(const self_type& rhs) -> self_type&
{
self_type tmp(rhs);
*this = std::move(tmp);
return *this;
}
//@}
/**
* @name Extended copy semantic
*/
//@{
/**
* The extended copy constructor.
*/
template <class T, std::size_t N, layout_type L>
template <class E>
inline pytensor<T, N, L>::pytensor(const xexpression<E>& e)
: base_type()
{
shape_type shape = xtl::forward_sequence<shape_type, decltype(e.derived_cast().shape())>(e.derived_cast().shape());
strides_type strides = xtl::make_sequence<strides_type>(N, size_type(0));
compute_strides(shape, layout_type::row_major, strides);
init_tensor(shape, strides);
semantic_base::assign(e);
}
/**
* The extended assignment operator.
*/
template <class T, std::size_t N, layout_type L>
template <class E>
inline auto pytensor<T, N, L>::operator=(const xexpression<E>& e) -> self_type&
{
return semantic_base::operator=(e);
}
//@}
template <class T, std::size_t N, layout_type L>
inline auto pytensor<T, N, L>::ensure(pybind11::handle h) -> self_type
{
return base_type::ensure(h);
}
template <class T, std::size_t N, layout_type L>
inline bool pytensor<T, N, L>::check_(pybind11::handle h)
{
return base_type::check_(h);
}
template <class T, std::size_t N, layout_type L>
inline void pytensor<T, N, L>::init_tensor(const shape_type& shape, const strides_type& strides)
{
detail::numpy_strides<N> python_strides;
std::transform(strides.begin(), strides.end(), python_strides.value,
[](auto v) { return sizeof(T) * v; });
int flags = NPY_ARRAY_ALIGNED;
if (!std::is_const<T>::value)
{
flags |= NPY_ARRAY_WRITEABLE;
}
auto dtype = pybind11::detail::npy_format_descriptor<T>::dtype();
auto tmp = pybind11::reinterpret_steal<pybind11::object>(
PyArray_NewFromDescr(&PyArray_Type, (PyArray_Descr*) dtype.release().ptr(), static_cast<int>(shape.size()),
const_cast<npy_intp*>(shape.data()), python_strides.value,
nullptr, flags, nullptr));
if (!tmp)
{
throw std::runtime_error("NumPy: unable to create ndarray");
}
this->m_ptr = tmp.release().ptr();
m_shape = shape;
m_strides = strides;
adapt_strides(m_shape, m_strides, m_backstrides);
m_storage = storage_type(reinterpret_cast<pointer>(PyArray_DATA(this->python_array())),
static_cast<size_type>(PyArray_SIZE(this->python_array())));
}
template <class T, std::size_t N, layout_type L>
inline void pytensor<T, N, L>::init_from_python()
{
if (!static_cast<bool>(*this))
{
return;
}
if (PyArray_NDIM(this->python_array()) != N)
{
throw std::runtime_error("NumPy: ndarray has incorrect number of dimensions");
}
std::copy(PyArray_DIMS(this->python_array()), PyArray_DIMS(this->python_array()) + N, m_shape.begin());
std::transform(PyArray_STRIDES(this->python_array()), PyArray_STRIDES(this->python_array()) + N, m_strides.begin(),
[](auto v) { return v / sizeof(T); });
adapt_strides(m_shape, m_strides, m_backstrides);
if (L != layout_type::dynamic && !do_strides_match(m_shape, m_strides, L, 1))
{
throw std::runtime_error("NumPy: passing container with bad strides for layout (is it a view?).");
}
m_storage = storage_type(reinterpret_cast<pointer>(PyArray_DATA(this->python_array())),
this->get_buffer_size());
}
template <class T, std::size_t N, layout_type L>
inline auto pytensor<T, N, L>::shape_impl() noexcept -> inner_shape_type&
{
return m_shape;
}
template <class T, std::size_t N, layout_type L>
inline auto pytensor<T, N, L>::shape_impl() const noexcept -> const inner_shape_type&
{
return m_shape;
}
template <class T, std::size_t N, layout_type L>
inline auto pytensor<T, N, L>::strides_impl() noexcept -> inner_strides_type&
{
return m_strides;
}
template <class T, std::size_t N, layout_type L>
inline auto pytensor<T, N, L>::strides_impl() const noexcept -> const inner_strides_type&
{
return m_strides;
}
template <class T, std::size_t N, layout_type L>
inline auto pytensor<T, N, L>::backstrides_impl() noexcept -> inner_backstrides_type&
{
return m_backstrides;
}
template <class T, std::size_t N, layout_type L>
inline auto pytensor<T, N, L>::backstrides_impl() const noexcept -> const inner_backstrides_type&
{
return m_backstrides;
}
template <class T, std::size_t N, layout_type L>
inline auto pytensor<T, N, L>::storage_impl() noexcept -> storage_type&
{
return m_storage;
}
template <class T, std::size_t N, layout_type L>
inline auto pytensor<T, N, L>::storage_impl() const noexcept -> const storage_type&
{
return m_storage;
}
}
#endif
================================================
FILE: include/xtensor-python/pyvectorize.hpp
================================================
/***************************************************************************
* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay *
* Copyright (c) QuantStack *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
#ifndef PY_VECTORIZE_HPP
#define PY_VECTORIZE_HPP
#include <type_traits>
#include "pyarray.hpp"
#include "xtensor/core/xvectorize.hpp"
namespace xt
{
template <class Func, class R, class... Args>
struct pyvectorizer
{
xvectorizer<Func, R> m_vectorizer;
template <class F, class = std::enable_if_t<!std::is_same<std::decay_t<F>, pyvectorizer>::value>>
pyvectorizer(F&& func)
: m_vectorizer(std::forward<F>(func))
{
}
inline pyarray<R> operator()(const pyarray<Args>&... args) const
{
pyarray<R> res = m_vectorizer(args...);
return res;
}
};
/**
* @brief Create numpy universal function from scalar function.
*/
template <class R, class... Args>
inline pyvectorizer<R (*)(Args...), R, Args...> pyvectorize(R (*f)(Args...))
{
return pyvectorizer<R (*)(Args...), R, Args...>(f);
}
/// @cond DOXYGEN_INCLUDE_OVERLOADS
template <class F, class R, class... Args>
inline pyvectorizer<F, R, Args...> pyvectorize(F&& f, R (*)(Args...))
{
return pyvectorizer<F, R, Args...>(std::forward<F>(f));
}
template <class F>
inline auto pyvectorize(F&& f) -> decltype(pyvectorize(std::forward<F>(f), (detail::get_function_type<F>*)nullptr))
{
return pyvectorize(std::forward<F>(f), (detail::get_function_type<F>*)nullptr);
}
/// @endcond
}
#endif
================================================
FILE: include/xtensor-python/xtensor_python_config.hpp
================================================
/***************************************************************************
* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay *
* Copyright (c) QuantStack *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
#ifndef XTENSOR_PYTHON_CONFIG_HPP
#define XTENSOR_PYTHON_CONFIG_HPP
#define XTENSOR_PYTHON_VERSION_MAJOR 0
#define XTENSOR_PYTHON_VERSION_MINOR 29
#define XTENSOR_PYTHON_VERSION_PATCH 0
#endif
================================================
FILE: include/xtensor-python/xtensor_type_caster_base.hpp
================================================
/***************************************************************************
* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay *
* Copyright (c) QuantStack *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
#ifndef XTENSOR_TYPE_CASTER_HPP
#define XTENSOR_TYPE_CASTER_HPP
#include <cstddef>
#include <algorithm>
#include <vector>
#include "xtensor/containers/xtensor.hpp"
#include "xtensor/containers/xfixed.hpp"
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
namespace pybind11
{
namespace detail
{
template <typename T, xt::layout_type L>
struct pybind_array_getter_impl
{
static auto run(handle src)
{
return array_t<T, array::c_style | array::forcecast>::ensure(src);
}
};
template <typename T>
struct pybind_array_getter_impl<T, xt::layout_type::column_major>
{
static auto run(handle src)
{
return array_t<T, array::f_style | array::forcecast>::ensure(src);
}
};
template <class T>
struct pybind_array_getter
{
};
template <class T, xt::layout_type L>
struct pybind_array_getter<xt::xarray<T, L>>
{
static auto run(handle src)
{
return pybind_array_getter_impl<T, L>::run(src);
}
};
template <class T, std::size_t N, xt::layout_type L>
struct pybind_array_getter<xt::xtensor<T, N, L>>
{
static auto run(handle src)
{
return pybind_array_getter_impl<T, L>::run(src);
}
};
template <class T, class FSH, xt::layout_type L>
struct pybind_array_getter<xt::xtensor_fixed<T, FSH, L>>
{
static auto run(handle src)
{
return pybind_array_getter_impl<T, L>::run(src);
}
};
template <class CT, class S, xt::layout_type L, class FST>
struct pybind_array_getter<xt::xstrided_view<CT, S, L, FST>>
{
static auto run(handle /*src*/)
{
return false;
}
};
template <class EC, xt::layout_type L, class SC, class Tag>
struct pybind_array_getter<xt::xarray_adaptor<EC, L, SC, Tag>>
{
static auto run(handle src)
{
auto buf = pybind_array_getter_impl<EC, L>::run(src);
return buf;
}
};
template <class EC, std::size_t N, xt::layout_type L, class Tag>
struct pybind_array_getter<xt::xtensor_adaptor<EC, N, L, Tag>>
{
static auto run(handle /*src*/)
{
return false;
}
};
template <class T>
struct pybind_array_dim_checker
{
template <class B>
static bool run(const B& /*buf*/)
{
return true;
}
};
template <class T, std::size_t N, xt::layout_type L>
struct pybind_array_dim_checker<xt::xtensor<T, N, L>>
{
template <class B>
static bool run(const B& buf)
{
return buf.ndim() == N;
}
};
template <class T, class FSH, xt::layout_type L>
struct pybind_array_dim_checker<xt::xtensor_fixed<T, FSH, L>>
{
template <class B>
static bool run(const B& buf)
{
return buf.ndim() == FSH::size();
}
};
template <class T>
struct pybind_array_shape_checker
{
template <class B>
static bool run(const B& /*buf*/)
{
return true;
}
};
template <class T, class FSH, xt::layout_type L>
struct pybind_array_shape_checker<xt::xtensor_fixed<T, FSH, L>>
{
template <class B>
static bool run(const B& buf)
{
auto shape = FSH();
return std::equal(shape.begin(), shape.end(), buf.shape());
}
};
// Casts a strided expression type to numpy array.If given a base,
// the numpy array references the src data, otherwise it'll make a copy.
// The writeable attributes lets you specify writeable flag for the array.
template <typename Type>
handle xtensor_array_cast(const Type& src, handle base = handle(), bool writeable = true)
{
// TODO: make use of xt::pyarray instead of array.
std::vector<std::size_t> python_strides(src.strides().size());
std::transform(src.strides().begin(), src.strides().end(),
python_strides.begin(), [](auto v) {
return sizeof(typename Type::value_type) * v;
});
std::vector<std::size_t> python_shape(src.shape().size());
std::copy(src.shape().begin(), src.shape().end(), python_shape.begin());
array a(python_shape, python_strides, &*(src.begin()), base);
if (!writeable)
{
array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
}
return a.release();
}
// Takes an lvalue ref to some strided expression type and a (python) base object, creating a numpy array that
// reference the expression object's data with `base` as the python-registered base class (if omitted,
// the base will be set to None, and lifetime management is up to the caller). The numpy array is
// non-writeable if the given type is const.
template <typename Type, typename CType>
handle xtensor_ref_array(CType& src, handle parent = none())
{
return xtensor_array_cast<Type>(src, parent, !std::is_const<CType>::value);
}
// Takes a pointer to a strided expression, builds a capsule around it, then returns a numpy
// array that references the encapsulated data with a python-side reference to the capsule to tie
// its destruction to that of any dependent python objects. Const-ness is determined by whether or
// not the CType of the pointer given is const.
template <typename Type, typename CType>
handle xtensor_encapsulate(CType* src)
{
capsule base(src, [](void* o) { delete static_cast<CType*>(o); });
return xtensor_ref_array<Type>(*src, base);
}
// Base class of type_caster for strided expressions
template <class Type>
struct xtensor_type_caster_base
{
private:
// Cast implementation
template <typename CType>
static handle cast_impl(CType* src, return_value_policy policy, handle parent)
{
switch (policy)
{
case return_value_policy::take_ownership:
case return_value_policy::automatic:
return xtensor_encapsulate<Type>(src);
case return_value_policy::move:
return xtensor_encapsulate<Type>(new CType(std::move(*src)));
case return_value_policy::copy:
return xtensor_array_cast<Type>(*src);
case return_value_policy::reference:
case return_value_policy::automatic_reference:
return xtensor_ref_array<Type>(*src);
case return_value_policy::reference_internal:
return xtensor_ref_array<Type>(*src, parent);
default:
throw cast_error("unhandled return_value_policy: should not happen!");
};
}
public:
PYBIND11_TYPE_CASTER(Type, _("numpy.ndarray[") + npy_format_descriptor<typename Type::value_type>::name + _("]"));
bool load(handle src, bool convert)
{
using T = typename Type::value_type;
if (!convert && !array_t<T>::check_(src))
{
return false;
}
auto buf = pybind_array_getter<Type>::run(src);
if (!buf)
{
return false;
}
if (!pybind_array_dim_checker<Type>::run(buf))
{
return false;
}
if (!pybind_array_shape_checker<Type>::run(buf))
{
return false;
}
std::vector<size_t> shape(buf.ndim());
std::copy(buf.shape(), buf.shape() + buf.ndim(), shape.begin());
value = Type::from_shape(shape);
std::copy(buf.data(), buf.data() + buf.size(), value.data());
return true;
}
// Normal returned non-reference, non-const value:
static handle cast(Type&& src, return_value_policy /* policy */, handle parent)
{
return cast_impl(&src, return_value_policy::move, parent);
}
// If you return a non-reference const, we mark the numpy array readonly:
static handle cast(const Type&& src, return_value_policy /* policy */, handle parent)
{
return cast_impl(&src, return_value_policy::move, parent);
}
// lvalue reference return; default (automatic) becomes copy
static handle cast(Type& src, return_value_policy policy, handle parent)
{
if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)
{
policy = return_value_policy::copy;
}
return cast_impl(&src, policy, parent);
}
// const lvalue reference return; default (automatic) becomes copy
static handle cast(const Type& src, return_value_policy policy, handle parent)
{
if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)
{
policy = return_value_policy::copy;
}
return cast(&src, policy, parent);
}
// non-const pointer return
static handle cast(Type* src, return_value_policy policy, handle parent)
{
return cast_impl(src, policy, parent);
}
// const pointer return
static handle cast(const Type* src, return_value_policy policy, handle parent)
{
return cast_impl(src, policy, parent);
}
};
}
}
#endif
================================================
FILE: readthedocs.yml
================================================
version: 2
build:
os: "ubuntu-22.04"
tools:
python: "mambaforge-22.9"
sphinx:
# Path to Sphinx configuration file
configuration: docs/source/conf.py
conda:
environment: docs/environment.yml
================================================
FILE: test/CMakeLists.txt
================================================
############################################################################
# Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay #
# Copyright (c) QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
cmake_minimum_required(VERSION 3.29)
if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
project(xtensor-python-test)
find_package(pybind11 REQUIRED)
set(PYBIND11_INCLUDE_DIR ${pybind11_INCLUDE_DIRS})
find_package(xtensor REQUIRED CONFIG)
set(XTENSOR_INCLUDE_DIR ${xtensor_INCLUDE_DIRS})
find_package(xtensor-python REQUIRED CONFIG)
set(XTENSOR_PYTHON_INCLUDE_DIR ${xtensor-python_INCLUDE_DIRS})
endif ()
message(STATUS "Forcing tests build type to Release")
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE)
include(CheckCXXCompilerFlag)
string(TOUPPER "${CMAKE_BUILD_TYPE}" U_CMAKE_BUILD_TYPE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++20")
if(MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /MP /bigobj")
set(CMAKE_EXE_LINKER_FLAGS /MANIFEST:NO)
endif()
if (DOWNLOAD_GTEST OR GTEST_SRC_DIR)
if(DOWNLOAD_GTEST)
# Download and unpack googletest at configure time
configure_file(downloadGTest.cmake.in googletest-download/CMakeLists.txt)
else()
# Copy local source of googletest at configure time
configure_file(copyGTest.cmake.in googletest-download/CMakeLists.txt)
endif()
execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" .
RESULT_VARIABLE result
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download )
if(result)
message(FATAL_ERROR "CMake step for googletest failed: ${result}")
endif()
execute_process(COMMAND ${CMAKE_COMMAND} --build .
RESULT_VARIABLE result
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download )
if(result)
message(FATAL_ERROR "Build step for googletest failed: ${result}")
endif()
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
# Add googletest directly to our build. This defines
# the gtest and gtest_main targets.
add_subdirectory(${CMAKE_CURRENT_BINARY_DIR}/googletest-src
${CMAKE_CURRENT_BINARY_DIR}/googletest-build EXCLUDE_FROM_ALL)
set(GTEST_INCLUDE_DIRS "${gtest_SOURCE_DIR}/include")
set(GTEST_BOTH_LIBRARIES gtest_main gtest)
else()
find_package(GTest REQUIRED)
endif()
find_package(Threads)
include_directories(${GTEST_INCLUDE_DIRS})
set(XTENSOR_PYTHON_TESTS
main.cpp
test_pyarray.cpp
test_pyarray_traits.cpp
test_pytensor.cpp
test_pyvectorize.cpp
test_sfinae.cpp
)
add_executable(test_xtensor_python ${XTENSOR_PYTHON_TESTS} ${XTENSOR_PYTHON_HEADERS})
target_link_libraries(test_xtensor_python xtensor-python ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} ${PYTHON_LIBRARIES})
if(DOWNLOAD_GTEST OR GTEST_SRC_DIR)
add_dependencies(test_xtensor_python gtest_main)
endif()
add_custom_target(xtest COMMAND ./test_xtensor_python DEPENDS test_xtensor_python)
================================================
FILE: test/copyGTest.cmake.in
================================================
############################################################################
# Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay #
# Copyright (c) QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
cmake_minimum_required(VERSION 3.29)
project(googletest-download NONE)
include(ExternalProject)
ExternalProject_Add(googletest
URL "${GTEST_SRC_DIR}"
SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-src"
BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-build"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
================================================
FILE: test/downloadGTest.cmake.in
================================================
############################################################################
# Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay #
# Copyright (c) QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
cmake_minimum_required(VERSION 3.29)
project(googletest-download NONE)
include(ExternalProject)
ExternalProject_Add(googletest
GIT_REPOSITORY https://github.com/google/googletest.git
GIT_TAG v1.16.0
SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-src"
BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-build"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
================================================
FILE: test/main.cpp
================================================
/***************************************************************************
* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay *
* Copyright (c) QuantStack *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
// Required to avoid the error "std does not have member copysign"
#include <cmath>
#include "gtest/gtest.h"
#include <pybind11/embed.h>
#define FORCE_IMPORT_ARRAY
#include "xtensor-python/pyarray.hpp"
namespace py = pybind11;
int main(int argc, char* argv[])
{
// Initialize all the things (Python, numpy, gtest)
py::scoped_interpreter guard{};
xt::import_numpy();
::testing::InitGoogleTest(&argc, argv);
// Run test suite
int ret = RUN_ALL_TESTS();
// Return test results
return ret;
}
================================================
FILE: test/test_common.hpp
================================================
/***************************************************************************
* Copyright (c) Wolf Vollprecht, Johan Mabille and Sylvain Corlay *
* Copyright (c) QuantStack *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
#ifndef TEST_COMMON_HPP
#define TEST_COMMON_HPP
#include "xtensor/core/xlayout.hpp"
#include "xtensor/misc/xmanipulation.hpp"
#include "xtl/xsequence.hpp"
namespace xt
{
template <class T, class A>
bool operator==(const uvector<T, A>& lhs, const std::vector<T, A>& rhs)
{
return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin());
}
template <class T, class A>
bool operator==(const std::vector<T, A>& lhs, const uvector<T, A>& rhs)
{
return rhs == lhs;
}
template <class C = std::vector<std::size_t>>
struct layout_result
{
using vector_type = uvector<int>;
using size_type = typename C::value_type;
using shape_type = C;
using strides_type = get_strides_t<shape_type>;
using assigner_type = std::vector<std::vector<vector_type>>;
inline layout_result()
{
m_shape = {3, 2, 4};
m_assigner.resize(m_shape[0]);
for (size_type i = 0; i < m_shape[0]; ++i)
{
m_assigner[i].resize(m_shape[1]);
}
m_assigner[0][0] = {-1, 1, 2, 3};
m_assigner[0][1] = {4, 5, 6, 7};
m_assigner[1][0] = {8, 9, 10, 11};
m_assigner[1][1] = {12, 13, 14, 15};
m_assigner[2][0] = {16, 17, 18, 19};
m_assigner[2][1] = {20, 21, 22, 23};
}
shape_type m_shape;
strides_type m_strides;
strides_type m_backstrides;
vector_type m_data;
layout_type m_layout;
assigner_type m_assigner;
inline size_type size() const { return m_data.size(); }
inline const shape_type& shape() const { return m_shape; }
inline const strides_type& strides() const { return m_strides; }
inline const strides_type& backstrides() const { return m_backstrides; }
inline layout_type layout() const { return m_layout; }
inline const vector_type& data() const { return m_data; }
};
template <class C = std::vector<std::size_t>>
struct row_major_result : layout_result<C>
{
inline row_major_result()
{
this->m_strides = {8, 4, 1};
this->m_backstrides = {16, 4, 3};
this->m_data = {-1, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23};
this->m_layout = layout_type::row_major;
}
};
template <class C = std::vector<std::size_t>>
struct column_major_result : layout_result<C>
{
inline column_major_result()
{
this->m_strides = {1, 3, 6};
this->m_backstrides = {2, 3, 18};
this->m_data = {-1, 8, 16, 4, 12, 20,
1, 9, 17, 5, 13, 21,
2, 10, 18, 6, 14, 22,
3, 11, 19, 7, 15, 23};
this->m_layout = layout_type::column_major;
}
};
template <class C = std::vector<std::size_t>>
struct central_major_result : layout_result<C>
{
inline central_major_result()
{
this->m_strides = {8, 1, 2};
this->m_backstrides = {16, 1, 6};
this->m_data = {-1, 4, 1, 5, 2, 6, 3, 7,
8, 12, 9, 13, 10, 14, 11, 15,
16, 20, 17, 21, 18, 22, 19, 23};
this->m_layout = layout_type::dynamic;
}
};
template <class C = std::vector<std::size_t>>
struct unit_shape_result
{
using vector_type = std::vector<int>;
using size_type = typename C::value_type;
using shape_type = C;
using strides_type = C;
using assigner_type = std::vector<std::vector<vector_type>>;
inline unit_shape_result()
{
m_shape = {3, 1, 4};
m_strides = {4, 0, 1};
m_backstrides = {8, 0, 3};
m_data = {-1, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19};
m_layout = layout_type::dynamic;
m_assigner.resize(m_shape[0]);
for (std::size_t i = 0; i < std::size_t(m_shape[0]); ++i)
{
m_assigner[i].resize(m_shape[1]);
}
m_assigner[0][0] = {-1, 1, 2, 3};
m_assigner[1][0] = {8, 9, 10, 11};
m_assigner[2][0] = {16, 17, 18, 19};
}
shape_type m_shape;
strides_type m_strides;
strides_type m_backstrides;
vector_type m_data;
layout_type m_layout;
assigner_type m_assigner;
inline size_type size() const { return m_data.size(); }
inline const shape_type& shape() const { return m_shape; }
inline const strides_type& strides() const { return m_strides; }
inline const strides_type& backstrides() const { return m_backstrides; }
inline layout_type layout() const { return m_layout; }
inline const vector_type& data() const { return m_data; }
};
template <class V, class R>
void compare_shape(V& vec, const R& result, bool compare_layout = true)
{
EXPECT_TRUE(std::equal(vec.shape().cbegin(), vec.shape().cend(), result.shape().cbegin()));
EXPECT_TRUE(std::equal(vec.strides().cbegin(), vec.strides().cend(), result.strides().cbegin()));
// TODO: check why this does not build on modern MSVC compilers
#ifndef WIN32
EXPECT_TRUE(std::equal(vec.backstrides().cbegin(), vec.backstrides().cend(), result.backstrides().cbegin()));
#endif
EXPECT_EQ(vec.size(), result.size());
if (compare_layout)
{
EXPECT_EQ(vec.layout(), result.layout());
}
}
template <class V, class C = std::vector<std::size_t>>
void test_resize(V& vec)
{
{
SCOPED_TRACE("row_major resize");
row_major_result<C> rm;
vec.resize(rm.m_shape, layout_type::row_major);
compare_shape(vec, rm);
}
{
SCOPED_TRACE("different types resize");
row_major_result<C> rm;
auto v_copy_a = vec;
auto v_copy_b = vec;
std::array<std::size_t, 3> ar = {3, 2, 4};
std::vector<std::size_t> vr = {3, 2, 4};
v_copy_a.resize(ar);
compare_shape(v_copy_a, rm);
v_copy_b.resize(vr);
compare_shape(v_copy_b, rm);
}
{
SCOPED_TRACE("column_major resize");
column_major_result<C> cm;
vec.resize(cm.m_shape, layout_type::column_major);
compare_shape(vec, cm);
}
{
SCOPED_TRACE("central_major resize");
central_major_result<C> cem;
vec.resize(cem.m_shape, cem.m_strides);
compare_shape(vec, cem);
}
{
SCOPED_TRACE("unit_shape resize");
unit_shape_result<C> usr;
vec.resize(usr.m_shape, layout_type::row_major);
compare_shape(vec, usr, false);
EXPECT_EQ(vec.layout(), layout_type::row_major);
}
}
template <class V, class C = std::vector<std::size_t>>
void test_transpose(V& vec)
{
using shape_type = typename V::shape_type;
using strides_type = typename V::strides_type;
{
SCOPED_TRACE("transpose");
shape_type shape_new = xtl::make_sequence<shape_type>(vec.dimension(), 0);
std::copy(vec.shape().cbegin(), vec.shape().cend(), shape_new.begin());
auto vt = transpose(vec);
std::reverse(shape_new.begin(), shape_new.end());
EXPECT_EQ(vt.shape(), shape_new);
EXPECT_TRUE(std::equal(vt.shape().cbegin(), vt.shape().cend(), shape_new.cbegin()));
}
{
SCOPED_TRACE("transpose with data");
row_major_result<C> rm;
vec.resize(rm.shape(), layout_type::row_major);
assign_array(vec, rm.m_assigner);
EXPECT_TRUE(std::equal(vec.storage().cbegin(), vec.storage().cend(), rm.m_data.cbegin()));
auto vec_copy = vec;
shape_type shape_new(rm.shape());
auto vt = transpose(vec);
std::reverse(shape_new.begin(), shape_new.end());
EXPECT_EQ(vt.shape(), shape_new);
EXPECT_TRUE(std::equal(vt.storage().cbegin(), vt.storage().cend(), rm.m_data.cbegin()));
strides_type new_strides = {rm.m_strides[2],
rm.m_strides[1],
rm.m_strides[0]};
EXPECT_EQ(vt.strides(), new_strides);
strides_type new_backstrides = {rm.m_backstrides[2],
rm.m_backstrides[1],
rm.m_backstrides[0]};
EXPECT_EQ(vt.backstrides(), new_backstrides);
EXPECT_EQ(vec_copy(0, 0, 0), vt(0, 0, 0));
EXPECT_EQ(vec_copy(0, 1, 0), vt(0, 1, 0));
EXPECT_EQ(vec_copy(1, 1, 0), vt(0, 1, 1));
EXPECT_EQ(vec_copy(1, 1, 2), vt(2, 1, 1));
}
{
SCOPED_TRACE("transpose with permutation");
row_major_result<C> rm;
vec.resize(rm.shape(), layout_type::row_major);
assign_array(vec, rm.m_assigner);
EXPECT_TRUE(std::equal(vec.storage().cbegin(), vec.storage().cend(), rm.m_data.cbegin()));
auto vec_copy = vec;
shape_type a = xtl::make_sequence<shape_type>(vec.dimension(), 0);
std::copy(vec.shape().cbegin(), vec.shape().cend(), a.begin());
auto vt = transpose(vec, {1, 0, 2});
shape_type shape_new = {a[1], a[0], a[2]};
EXPECT_TRUE(std::equal(vt.shape().cbegin(), vt.shape().cend(), shape_new.begin()));
EXPECT_TRUE(std::equal(vt.storage().cbegin(), vt.storage().cend(), rm.m_data.cbegin()));
strides_type new_strides = {rm.m_strides[1],
rm.m_strides[0],
rm.m_strides[2]};
EXPECT_EQ(vt.strides(), new_strides);
// strides_type new_backstrides = {rm.m_backstrides[1],
// rm.m_backstrides[0],
// rm.m_backstrides[2]};
// EXPECT_EQ(vt.backstrides(), new_backstrides);
EXPECT_EQ(vec_copy(0, 0, 0), vt(0, 0, 0));
EXPECT_EQ(vec_copy(0, 1, 0), vt(1, 0, 0));
EXPECT_EQ(vec_copy(1, 1, 0), vt(1, 1
gitextract_lx7y446y/ ├── .github/ │ └── workflows/ │ ├── linux.yml │ ├── osx.yml │ └── windows.yml ├── .gitignore ├── CMakeLists.txt ├── LICENSE ├── README.md ├── benchmark/ │ ├── CMakeLists.txt │ ├── benchmark_pyarray.py │ ├── benchmark_pybind_array.py │ ├── benchmark_pybind_vectorize.py │ ├── benchmark_pytensor.py │ ├── benchmark_pyvectorize.py │ ├── main.cpp │ └── setup.py ├── cmake/ │ └── FindNumPy.cmake ├── docs/ │ ├── Doxyfile │ ├── Makefile │ ├── environment.yml │ ├── make.bat │ └── source/ │ ├── _static/ │ │ └── main_stylesheet.css │ ├── api_reference.rst │ ├── array_tensor.rst │ ├── basic_usage.rst │ ├── compilers.rst │ ├── conf.py │ ├── cookiecutter.rst │ ├── dev_build_options.rst │ ├── examples/ │ │ ├── copy_cast/ │ │ │ ├── CMakeLists.txt │ │ │ ├── example.py │ │ │ └── main.cpp │ │ ├── readme_example_1/ │ │ │ ├── CMakeLists.txt │ │ │ ├── example.py │ │ │ └── main.cpp │ │ └── sfinae/ │ │ ├── CMakeLists.txt │ │ ├── example.py │ │ ├── main.cpp │ │ ├── mymodule.hpp │ │ └── python.cpp │ ├── examples.rst │ ├── index.rst │ ├── installation.rst │ ├── numpy_capi.rst │ ├── pyarray.rst │ ├── pytensor.rst │ ├── pyvectorize.rst │ └── releasing.rst ├── environment-dev.yml ├── include/ │ └── xtensor-python/ │ ├── pyarray.hpp │ ├── pyarray_backstrides.hpp │ ├── pycontainer.hpp │ ├── pynative_casters.hpp │ ├── pystrides_adaptor.hpp │ ├── pytensor.hpp │ ├── pyvectorize.hpp │ ├── xtensor_python_config.hpp │ └── xtensor_type_caster_base.hpp ├── readthedocs.yml ├── test/ │ ├── CMakeLists.txt │ ├── copyGTest.cmake.in │ ├── downloadGTest.cmake.in │ ├── main.cpp │ ├── test_common.hpp │ ├── test_pyarray.cpp │ ├── test_pyarray_traits.cpp │ ├── test_pytensor.cpp │ ├── test_pyvectorize.cpp │ └── test_sfinae.cpp ├── test_python/ │ ├── main.cpp │ ├── setup.py │ └── test_pyarray.py ├── xtensor-python.pc.in └── xtensor-pythonConfig.cmake.in
SYMBOL INDEX (360 symbols across 26 files)
FILE: benchmark/main.cpp
function PYBIND11_MODULE (line 15) | PYBIND11_MODULE(benchmark_xtensor_python, m)
FILE: benchmark/setup.py
class get_pybind_include (line 10) | class get_pybind_include(object):
method __init__ (line 17) | def __init__(self, user=False):
method __str__ (line 20) | def __str__(self):
class get_numpy_include (line 24) | class get_numpy_include(object):
method __str__ (line 31) | def __str__(self):
function has_flag (line 53) | def has_flag(compiler, flagname):
function cpp_flag (line 67) | def cpp_flag(compiler):
class BuildExt (line 77) | class BuildExt(build_ext):
method build_extensions (line 87) | def build_extensions(self):
FILE: docs/source/conf.py
function setup (line 18) | def setup(app):
FILE: docs/source/examples/copy_cast/main.cpp
function sum_of_sines (line 8) | double sum_of_sines(T& m)
function sum_of_cosines (line 15) | double sum_of_cosines(const xt::xarray<double>& m)
function PYBIND11_MODULE (line 21) | PYBIND11_MODULE(mymodule, m)
FILE: docs/source/examples/readme_example_1/main.cpp
function sum_of_sines (line 7) | double sum_of_sines(xt::pyarray<double>& m)
function PYBIND11_MODULE (line 13) | PYBIND11_MODULE(mymodule, m)
FILE: docs/source/examples/sfinae/main.cpp
function main (line 4) | int main()
FILE: docs/source/examples/sfinae/mymodule.hpp
type mymodule (line 3) | namespace mymodule {
type is_std_vector (line 6) | struct is_std_vector
type is_std_vector<std::vector<T> > (line 12) | struct is_std_vector<std::vector<T> >
function times_dimension (line 19) | void times_dimension(T& t)
function times_dimension (line 27) | void times_dimension(T& t)
FILE: docs/source/examples/sfinae/python.cpp
function PYBIND11_MODULE (line 6) | PYBIND11_MODULE(mymodule, m)
FILE: include/xtensor-python/pyarray.hpp
type xt (line 28) | namespace xt
class pyarray (line 31) | class pyarray
method pyarray (line 183) | pyarray(self_type&&) = default;
method self_type (line 184) | self_type& operator=(self_type&& e) = default;
type xiterable_inner_types<pyarray<T, L>> (line 102) | struct xiterable_inner_types<pyarray<T, L>>
type xcontainer_inner_types<pyarray<T, L>> (line 108) | struct xcontainer_inner_types<pyarray<T, L>>
class pyarray (line 137) | class pyarray : public pycontainer<pyarray<T, L>>,
method pyarray (line 183) | pyarray(self_type&&) = default;
method self_type (line 184) | self_type& operator=(self_type&& e) = default;
function layout_type (line 571) | layout_type pyarray<T, L>::default_dynamic_layout()
type pybind11 (line 34) | namespace pybind11
type detail (line 36) | namespace detail
type handle_type_name<xt::pyarray<T, L>> (line 40) | struct handle_type_name<xt::pyarray<T, L>>
method PYBIND11_DESCR (line 42) | static PYBIND11_DESCR name()
type pyobject_caster<xt::pyarray<T, L>> (line 50) | struct pyobject_caster<xt::pyarray<T, L>>
method load (line 54) | bool load(handle src, bool convert)
method handle (line 67) | static handle cast(const handle& src, return_value_policy, handle)
type type_caster<xt::xexpression<xt::pyarray<T, L>>> (line 81) | struct type_caster<xt::xexpression<xt::pyarray<T, L>>> : pyobject_ca...
type xt (line 99) | namespace xt
class pyarray (line 31) | class pyarray
method pyarray (line 183) | pyarray(self_type&&) = default;
method self_type (line 184) | self_type& operator=(self_type&& e) = default;
type xiterable_inner_types<pyarray<T, L>> (line 102) | struct xiterable_inner_types<pyarray<T, L>>
type xcontainer_inner_types<pyarray<T, L>> (line 108) | struct xcontainer_inner_types<pyarray<T, L>>
class pyarray (line 137) | class pyarray : public pycontainer<pyarray<T, L>>,
method pyarray (line 183) | pyarray(self_type&&) = default;
method self_type (line 184) | self_type& operator=(self_type&& e) = default;
function layout_type (line 571) | layout_type pyarray<T, L>::default_dynamic_layout()
FILE: include/xtensor-python/pyarray_backstrides.hpp
type xt (line 16) | namespace xt
class pybackstrides_iterator (line 24) | class pybackstrides_iterator
class pyarray_backstrides (line 93) | class pyarray_backstrides
method pyarray_backstrides (line 112) | pyarray_backstrides() = default;
FILE: include/xtensor-python/pycontainer.hpp
type xt (line 39) | namespace xt
class pycontainer (line 55) | class pycontainer : public pybind11::object,
method pycontainer (line 114) | pycontainer(const pycontainer&) = default;
method pycontainer (line 115) | pycontainer& operator=(const pycontainer&) = default;
method pycontainer (line 117) | pycontainer(pycontainer&&) = default;
method pycontainer (line 118) | pycontainer& operator=(pycontainer&&) = default;
type detail (line 148) | namespace detail
type numpy_traits (line 151) | struct numpy_traits
type numpy_traits<T, std::enable_if_t<pybind11::detail::satisfies_any_of<T, std::is_arithmetic, xtl::is_complex>::value>> (line 154) | struct numpy_traits<T, std::enable_if_t<pybind11::detail::satisfies_...
type numpy_enum_adjuster (line 182) | struct numpy_enum_adjuster
method pyarray_type (line 184) | static inline int pyarray_type(PyArrayObject* obj)
type numpy_enum_adjuster<true> (line 191) | struct numpy_enum_adjuster<true>
method pyarray_type (line 193) | static inline int pyarray_type(PyArrayObject* obj)
function pyarray_type (line 204) | inline int pyarray_type(PyArrayObject* obj)
function default_initialize_impl (line 210) | void default_initialize_impl(T& /*storage*/, std::false_type)
function default_initialize_impl (line 215) | void default_initialize_impl(T& storage, std::true_type)
function default_initialize (line 222) | void default_initialize(T& storage)
function check_array_type (line 229) | bool check_array_type(const pybind11::handle& src, std::true_type)
function check_array_type (line 236) | bool check_array_type(const pybind11::handle& src, std::false_type)
function check_array (line 243) | bool check_array(const pybind11::handle& src)
type check_dims (line 346) | struct check_dims
method run (line 348) | static bool run(std::size_t)
type check_dims<std::array<T, N>> (line 355) | struct check_dims<std::array<T, N>>
method run (line 357) | static bool run(std::size_t new_dim)
function PyObject (line 300) | inline PyObject* pycontainer<D>::raw_array_t(PyObject* ptr)
function PyArrayObject (line 314) | inline PyArrayObject* pycontainer<D>::python_array() const
type detail (line 343) | namespace detail
type numpy_traits (line 151) | struct numpy_traits
type numpy_traits<T, std::enable_if_t<pybind11::detail::satisfies_any_of<T, std::is_arithmetic, xtl::is_complex>::value>> (line 154) | struct numpy_traits<T, std::enable_if_t<pybind11::detail::satisfies_...
type numpy_enum_adjuster (line 182) | struct numpy_enum_adjuster
method pyarray_type (line 184) | static inline int pyarray_type(PyArrayObject* obj)
type numpy_enum_adjuster<true> (line 191) | struct numpy_enum_adjuster<true>
method pyarray_type (line 193) | static inline int pyarray_type(PyArrayObject* obj)
function pyarray_type (line 204) | inline int pyarray_type(PyArrayObject* obj)
function default_initialize_impl (line 210) | void default_initialize_impl(T& /*storage*/, std::false_type)
function default_initialize_impl (line 215) | void default_initialize_impl(T& storage, std::true_type)
function default_initialize (line 222) | void default_initialize(T& storage)
function check_array_type (line 229) | bool check_array_type(const pybind11::handle& src, std::true_type)
function check_array_type (line 236) | bool check_array_type(const pybind11::handle& src, std::false_type)
function check_array (line 243) | bool check_array(const pybind11::handle& src)
type check_dims (line 346) | struct check_dims
method run (line 348) | static bool run(std::size_t)
type check_dims<std::array<T, N>> (line 355) | struct check_dims<std::array<T, N>>
method run (line 357) | static bool run(std::size_t new_dim)
function layout_type (line 453) | inline layout_type pycontainer<D>::layout() const
function import_numpy (line 497) | inline void import_numpy()
type workaround (line 509) | namespace workaround
function long_long_allocator (line 512) | inline void long_long_allocator()
FILE: include/xtensor-python/pynative_casters.hpp
type pybind11 (line 15) | namespace pybind11
type detail (line 17) | namespace detail
type type_caster<xt::xarray<T, L>> (line 21) | struct type_caster<xt::xarray<T, L>> : xtensor_type_caster_base<xt::...
type type_caster<xt::xtensor<T, N, L>> (line 27) | struct type_caster<xt::xtensor<T, N, L>> : xtensor_type_caster_base<...
type type_caster<xt::xtensor_fixed<T, FSH, L>> (line 33) | struct type_caster<xt::xtensor_fixed<T, FSH, L>> : xtensor_type_cast...
type type_caster<xt::xstrided_view<CT, S, L, FST>> (line 39) | struct type_caster<xt::xstrided_view<CT, S, L, FST>> : xtensor_type_...
type type_caster<xt::xarray_adaptor<EC, L, SC, Tag>> (line 45) | struct type_caster<xt::xarray_adaptor<EC, L, SC, Tag>> : xtensor_typ...
type type_caster<xt::xtensor_adaptor<EC, N, L, Tag>> (line 51) | struct type_caster<xt::xtensor_adaptor<EC, N, L, Tag>> : xtensor_typ...
FILE: include/xtensor-python/pystrides_adaptor.hpp
type xt (line 16) | namespace xt
class pystrides_iterator (line 20) | class pystrides_iterator
method pystrides_iterator (line 92) | pystrides_iterator() = default;
method pystrides_iterator (line 94) | inline pystrides_iterator(pointer current, shape_pointer shape)
method reference (line 100) | inline reference operator*() const
method pointer (line 105) | inline pointer operator->() const
method reference (line 112) | inline reference operator[](difference_type n) const
method self_type (line 117) | inline self_type& operator++()
method self_type (line 124) | inline self_type& operator--()
method self_type (line 131) | inline self_type operator++(int)
method self_type (line 139) | inline self_type operator--(int)
method self_type (line 147) | inline self_type& operator+=(difference_type n)
method self_type (line 154) | inline self_type& operator-=(difference_type n)
method self_type (line 161) | inline self_type operator+(difference_type n) const
method self_type (line 166) | inline self_type operator-(difference_type n) const
method difference_type (line 171) | inline difference_type operator-(const self_type& rhs) const
method pointer (line 177) | pointer get_pointer() const { return p_current; }
class pystrides_adaptor (line 27) | class pystrides_adaptor
method pystrides_adaptor (line 46) | pystrides_adaptor() = default;
class pystrides_iterator (line 79) | class pystrides_iterator
method pystrides_iterator (line 92) | pystrides_iterator() = default;
method pystrides_iterator (line 94) | inline pystrides_iterator(pointer current, shape_pointer shape)
method reference (line 100) | inline reference operator*() const
method pointer (line 105) | inline pointer operator->() const
method reference (line 112) | inline reference operator[](difference_type n) const
method self_type (line 117) | inline self_type& operator++()
method self_type (line 124) | inline self_type& operator--()
method self_type (line 131) | inline self_type operator++(int)
method self_type (line 139) | inline self_type operator--(int)
method self_type (line 147) | inline self_type& operator+=(difference_type n)
method self_type (line 154) | inline self_type& operator-=(difference_type n)
method self_type (line 161) | inline self_type operator+(difference_type n) const
method self_type (line 166) | inline self_type operator-(difference_type n) const
method difference_type (line 171) | inline difference_type operator-(const self_type& rhs) const
method pointer (line 177) | pointer get_pointer() const { return p_current; }
FILE: include/xtensor-python/pytensor.hpp
type xt (line 28) | namespace xt
class pytensor (line 31) | class pytensor
method pytensor (line 202) | pytensor(self_type&&) = default;
method self_type (line 203) | self_type& operator=(self_type&& e) = default;
type detail (line 109) | namespace detail {
type numpy_strides (line 112) | struct numpy_strides
type numpy_strides<0> (line 118) | struct numpy_strides<0>
type xiterable_inner_types<pytensor<T, N, L>> (line 126) | struct xiterable_inner_types<pytensor<T, N, L>>
type xcontainer_inner_types<pytensor<T, N, L>> (line 132) | struct xcontainer_inner_types<pytensor<T, N, L>>
class pytensor (line 163) | class pytensor : public pycontainer<pytensor<T, N, L>>,
method pytensor (line 202) | pytensor(self_type&&) = default;
method self_type (line 203) | self_type& operator=(self_type&& e) = default;
type pybind11 (line 34) | namespace pybind11
type detail (line 36) | namespace detail
type handle_type_name<xt::pytensor<T, N, L>> (line 40) | struct handle_type_name<xt::pytensor<T, N, L>>
method PYBIND11_DESCR (line 42) | static PYBIND11_DESCR name()
type pyobject_caster<xt::pytensor<T, N, L>> (line 50) | struct pyobject_caster<xt::pytensor<T, N, L>>
method load (line 54) | bool load(handle src, bool convert)
method handle (line 75) | static handle cast(const handle& src, return_value_policy, handle)
type type_caster<xt::xexpression<xt::pytensor<T, N, L>>> (line 89) | struct type_caster<xt::xexpression<xt::pytensor<T, N, L>>> : pyobjec...
type xt (line 107) | namespace xt
class pytensor (line 31) | class pytensor
method pytensor (line 202) | pytensor(self_type&&) = default;
method self_type (line 203) | self_type& operator=(self_type&& e) = default;
type detail (line 109) | namespace detail {
type numpy_strides (line 112) | struct numpy_strides
type numpy_strides<0> (line 118) | struct numpy_strides<0>
type xiterable_inner_types<pytensor<T, N, L>> (line 126) | struct xiterable_inner_types<pytensor<T, N, L>>
type xcontainer_inner_types<pytensor<T, N, L>> (line 132) | struct xcontainer_inner_types<pytensor<T, N, L>>
class pytensor (line 163) | class pytensor : public pycontainer<pytensor<T, N, L>>,
method pytensor (line 202) | pytensor(self_type&&) = default;
method self_type (line 203) | self_type& operator=(self_type&& e) = default;
FILE: include/xtensor-python/pyvectorize.hpp
type xt (line 18) | namespace xt
type pyvectorizer (line 22) | struct pyvectorizer
method pyvectorizer (line 27) | pyvectorizer(F&& func)
function pyvectorize (line 43) | inline pyvectorizer<R (*)(Args...), R, Args...> pyvectorize(R (*f)(Arg...
function pyvectorize (line 50) | inline pyvectorizer<F, R, Args...> pyvectorize(F&& f, R (*)(Args...))
function pyvectorize (line 56) | inline auto pyvectorize(F&& f) -> decltype(pyvectorize(std::forward<F>...
FILE: include/xtensor-python/xtensor_type_caster_base.hpp
type pybind11 (line 23) | namespace pybind11
type detail (line 25) | namespace detail
type pybind_array_getter_impl (line 28) | struct pybind_array_getter_impl
method run (line 30) | static auto run(handle src)
type pybind_array_getter_impl<T, xt::layout_type::column_major> (line 37) | struct pybind_array_getter_impl<T, xt::layout_type::column_major>
method run (line 39) | static auto run(handle src)
type pybind_array_getter (line 46) | struct pybind_array_getter
type pybind_array_getter<xt::xarray<T, L>> (line 51) | struct pybind_array_getter<xt::xarray<T, L>>
method run (line 53) | static auto run(handle src)
type pybind_array_getter<xt::xtensor<T, N, L>> (line 60) | struct pybind_array_getter<xt::xtensor<T, N, L>>
method run (line 62) | static auto run(handle src)
type pybind_array_getter<xt::xtensor_fixed<T, FSH, L>> (line 69) | struct pybind_array_getter<xt::xtensor_fixed<T, FSH, L>>
method run (line 71) | static auto run(handle src)
type pybind_array_getter<xt::xstrided_view<CT, S, L, FST>> (line 78) | struct pybind_array_getter<xt::xstrided_view<CT, S, L, FST>>
method run (line 80) | static auto run(handle /*src*/)
type pybind_array_getter<xt::xarray_adaptor<EC, L, SC, Tag>> (line 87) | struct pybind_array_getter<xt::xarray_adaptor<EC, L, SC, Tag>>
method run (line 89) | static auto run(handle src)
type pybind_array_getter<xt::xtensor_adaptor<EC, N, L, Tag>> (line 97) | struct pybind_array_getter<xt::xtensor_adaptor<EC, N, L, Tag>>
method run (line 99) | static auto run(handle /*src*/)
type pybind_array_dim_checker (line 107) | struct pybind_array_dim_checker
method run (line 110) | static bool run(const B& /*buf*/)
type pybind_array_dim_checker<xt::xtensor<T, N, L>> (line 117) | struct pybind_array_dim_checker<xt::xtensor<T, N, L>>
method run (line 120) | static bool run(const B& buf)
type pybind_array_dim_checker<xt::xtensor_fixed<T, FSH, L>> (line 127) | struct pybind_array_dim_checker<xt::xtensor_fixed<T, FSH, L>>
method run (line 130) | static bool run(const B& buf)
type pybind_array_shape_checker (line 138) | struct pybind_array_shape_checker
method run (line 141) | static bool run(const B& /*buf*/)
type pybind_array_shape_checker<xt::xtensor_fixed<T, FSH, L>> (line 148) | struct pybind_array_shape_checker<xt::xtensor_fixed<T, FSH, L>>
method run (line 151) | static bool run(const B& buf)
function handle (line 162) | handle xtensor_array_cast(const Type& src, handle base = handle(), b...
function handle (line 189) | handle xtensor_ref_array(CType& src, handle parent = none())
function handle (line 199) | handle xtensor_encapsulate(CType* src)
type xtensor_type_caster_base (line 207) | struct xtensor_type_caster_base
method handle (line 214) | static handle cast_impl(CType* src, return_value_policy policy, ha...
method load (line 239) | bool load(handle src, bool convert)
method handle (line 273) | static handle cast(Type&& src, return_value_policy /* policy */, h...
method handle (line 279) | static handle cast(const Type&& src, return_value_policy /* policy...
method handle (line 285) | static handle cast(Type& src, return_value_policy policy, handle p...
method handle (line 296) | static handle cast(const Type& src, return_value_policy policy, ha...
method handle (line 307) | static handle cast(Type* src, return_value_policy policy, handle p...
method handle (line 313) | static handle cast(const Type* src, return_value_policy policy, ha...
FILE: test/main.cpp
function main (line 22) | int main(int argc, char* argv[])
FILE: test/test_common.hpp
type xt (line 18) | namespace xt
type layout_result (line 33) | struct layout_result
method layout_result (line 42) | inline layout_result()
method size_type (line 65) | inline size_type size() const { return m_data.size(); }
method shape_type (line 66) | inline const shape_type& shape() const { return m_shape; }
method strides_type (line 67) | inline const strides_type& strides() const { return m_strides; }
method strides_type (line 68) | inline const strides_type& backstrides() const { return m_backstride...
method layout_type (line 69) | inline layout_type layout() const { return m_layout; }
method vector_type (line 70) | inline const vector_type& data() const { return m_data; }
type row_major_result (line 74) | struct row_major_result : layout_result<C>
method row_major_result (line 76) | inline row_major_result()
type column_major_result (line 88) | struct column_major_result : layout_result<C>
method column_major_result (line 90) | inline column_major_result()
type central_major_result (line 103) | struct central_major_result : layout_result<C>
method central_major_result (line 105) | inline central_major_result()
type unit_shape_result (line 117) | struct unit_shape_result
method unit_shape_result (line 126) | inline unit_shape_result()
method size_type (line 150) | inline size_type size() const { return m_data.size(); }
method shape_type (line 151) | inline const shape_type& shape() const { return m_shape; }
method strides_type (line 152) | inline const strides_type& strides() const { return m_strides; }
method strides_type (line 153) | inline const strides_type& backstrides() const { return m_backstride...
method layout_type (line 154) | inline layout_type layout() const { return m_layout; }
method vector_type (line 155) | inline const vector_type& data() const { return m_data; }
function compare_shape (line 159) | void compare_shape(V& vec, const R& result, bool compare_layout = true)
function test_resize (line 175) | void test_resize(V& vec)
function test_transpose (line 221) | void test_transpose(V& vec)
function assign_array (line 317) | void assign_array(V1& dst, const V2& src)
function test_bound_check (line 332) | void test_bound_check(V& vec)
function test_access (line 342) | void test_access(V& vec)
function test_element (line 390) | void test_element(V& vec)
function indexed_assign_array (line 454) | void indexed_assign_array(V1& dst, const V2& src)
function test_indexed_access (line 473) | void test_indexed_access(V& vec)
function test_broadcast (line 521) | void test_broadcast(V& vec)
function test_broadcast2 (line 562) | void test_broadcast2(V& vec)
function test_iterator (line 580) | void test_iterator(VRM& vecrm, VCM& veccm)
function test_xiterator (line 602) | void test_xiterator(V& vec)
function test_reverse_xiterator (line 682) | void test_reverse_xiterator(V& vec)
FILE: test/test_pyarray.cpp
type xt (line 19) | namespace xt
function test1 (line 26) | void test1 (ndarray<int>const& x)
function compute (line 32) | double compute(ndarray<double> const& xs)
function TEST (line 38) | TEST(pyarray, initializer_constructor)
function TEST (line 80) | TEST(pyarray, expression)
function TEST (line 103) | TEST(pyarray, shaped_constructor)
function TEST (line 122) | TEST(pyarray, from_shape)
function TEST (line 131) | TEST(pyarray, strided_constructor)
function TEST (line 138) | TEST(pyarray, valued_constructor)
function TEST (line 161) | TEST(pyarray, strided_valued_constructor)
function TEST (line 171) | TEST(pyarray, copy_semantic)
function TEST (line 199) | TEST(pyarray, move_semantic)
function TEST (line 225) | TEST(pyarray, extended_constructor)
function TEST (line 248) | TEST(pyarray, resize)
function TEST (line 258) | TEST(pyarray, transpose)
function TEST (line 264) | TEST(pyarray, access)
function TEST (line 270) | TEST(pyarray, indexed_access)
function TEST (line 276) | TEST(pyarray, broadcast_shape)
function TEST (line 283) | TEST(pyarray, iterator)
function TEST (line 294) | TEST(pyarray, initializer_list)
function TEST (line 304) | TEST(pyarray, zerod)
function TEST (line 310) | TEST(pyarray, reshape)
function TEST (line 324) | TEST(pyarray, view)
function TEST (line 331) | TEST(pyarray, zerod_copy)
FILE: test/test_pyarray_traits.cpp
type xt (line 16) | namespace xt
type testing (line 18) | namespace testing
class pyarray_traits (line 20) | class pyarray_traits: public ::testing::Test
method test_has_strides (line 38) | bool test_has_strides(T const&)
method test_result_layout (line 44) | xt::layout_type test_result_layout(T const& a1, T const& a2)
method test_linear_assign (line 52) | bool test_linear_assign(T const& a1, T const& a2)
method test_static_simd_linear_assign (line 61) | bool test_static_simd_linear_assign(T const& a1, T const& a2)
method test_dynamic_simd_linear_assign (line 69) | bool test_dynamic_simd_linear_assign(T const& a1, T const& a2)
method test_linear_static_layout (line 77) | bool test_linear_static_layout(T const& a1, T const& a2)
method test_contiguous_layout (line 85) | bool test_contiguous_layout(T const& a1, T const& a2)
function TEST_F (line 93) | TEST_F(pyarray_traits, result_layout)
function TEST_F (line 105) | TEST_F(pyarray_traits, has_strides)
function TEST_F (line 112) | TEST_F(pyarray_traits, has_linear_assign)
function TEST_F (line 119) | TEST_F(pyarray_traits, linear_assign)
function TEST_F (line 126) | TEST_F(pyarray_traits, static_simd_linear_assign)
function TEST_F (line 139) | TEST_F(pyarray_traits, dynamic_simd_linear_assign)
function TEST_F (line 152) | TEST_F(pyarray_traits, linear_static_layout)
function TEST_F (line 159) | TEST_F(pyarray_traits, contiguous_layout)
FILE: test/test_pytensor.cpp
type xt (line 19) | namespace xt
function TEST (line 23) | TEST(pytensor, initializer_constructor)
function TEST (line 37) | TEST(pytensor, shaped_constructor)
function TEST (line 56) | TEST(pytensor, from_shape)
function TEST (line 68) | TEST(pytensor, scalar_from_shape)
function TEST (line 77) | TEST(pytensor, strided_constructor)
function TEST (line 84) | TEST(pytensor, valued_constructor)
function TEST (line 107) | TEST(pytensor, strided_valued_constructor)
function TEST (line 117) | TEST(pytensor, copy_semantic)
function TEST (line 145) | TEST(pytensor, move_semantic)
function TEST (line 171) | TEST(pytensor, extended_constructor)
function TEST (line 182) | TEST(pytensor, resize)
function TEST (line 192) | TEST(pytensor, transpose)
function TEST (line 198) | TEST(pytensor, access)
function TEST (line 204) | TEST(pytensor, indexed_access)
function TEST (line 210) | TEST(pytensor, broadcast_shape)
function TEST (line 216) | TEST(pytensor, iterator)
function TEST (line 227) | TEST(pytensor, zerod)
function TEST (line 233) | TEST(pytensor, reshape)
function TEST (line 246) | TEST(pytensor, view)
function TEST (line 253) | TEST(pytensor, unary)
function TEST (line 263) | TEST(pytensor, inplace_pybind11_overload)
FILE: test/test_pyvectorize.cpp
type xt (line 17) | namespace xt
function f1 (line 20) | double f1(double a, double b)
function TEST (line 27) | TEST(pyvectorize, function)
function TEST (line 37) | TEST(pyvectorize, lambda)
function TEST (line 47) | TEST(pyvectorize, complex)
FILE: test/test_sfinae.cpp
type xt (line 18) | namespace xt
function sfinae_has_fixed_rank (line 21) | inline bool sfinae_has_fixed_rank(E&&)
function sfinae_has_fixed_rank (line 27) | inline bool sfinae_has_fixed_rank(E&&)
function TEST (line 32) | TEST(sfinae, fixed_rank)
function TEST (line 43) | TEST(sfinae, get_rank)
FILE: test_python/main.cpp
function example1 (line 27) | double example1(xt::pyarray<double>& m)
function example2 (line 32) | xt::pyarray<double> example2(xt::pyarray<double>& m)
function example3_xarray (line 37) | xt::xarray<int> example3_xarray(const xt::xarray<int>& m)
function example3_xarray_colmajor (line 42) | xt::xarray<int, xt::layout_type::column_major> example3_xarray_colmajor(
function example3_xtensor3 (line 48) | xt::xtensor<int, 3> example3_xtensor3(const xt::xtensor<int, 3>& m)
function example3_xtensor2 (line 53) | xt::xtensor<int, 2> example3_xtensor2(const xt::xtensor<int, 2>& m)
function example3_xtensor2_colmajor (line 58) | xt::xtensor<int, 2, xt::layout_type::column_major> example3_xtensor2_col...
function example3_xfixed3 (line 64) | xt::xtensor_fixed<int, xt::xshape<4, 3, 2>> example3_xfixed3(const xt::x...
function example3_xfixed2 (line 69) | xt::xtensor_fixed<int, xt::xshape<3, 2>> example3_xfixed2(const xt::xten...
function example3_xfixed2_colmajor (line 74) | xt::xtensor_fixed<int, xt::xshape<3, 2>, xt::layout_type::column_major> ...
function readme_example1 (line 82) | double readme_example1(xt::pyarray<double>& m)
function readme_example2 (line 88) | double readme_example2(double i, double j)
function complex_overload (line 93) | auto complex_overload(const xt::pyarray<std::complex<double>>& a)
function no_complex_overload (line 97) | auto no_complex_overload(const xt::pyarray<double>& a)
function complex_overload_reg (line 102) | auto complex_overload_reg(const std::complex<double>& a)
function no_complex_overload_reg (line 107) | auto no_complex_overload_reg(const double& a)
function array_addition (line 114) | xt::pyarray<double> array_addition(const xt::pyarray<double>& m, const x...
function array_subtraction (line 119) | xt::pyarray<double> array_subtraction(xt::pyarray<double>& m, xt::pyarra...
function array_multiplication (line 124) | xt::pyarray<double> array_multiplication(xt::pyarray<double>& m, xt::pya...
function array_division (line 129) | xt::pyarray<double> array_division(xt::pyarray<double>& m, xt::pyarray<d...
function add (line 136) | int add(int i, int j)
function typestring (line 141) | std::string typestring() { return "Unknown"; }
function int_overload (line 152) | inline std::string int_overload(xt::pyarray<T>& m)
function dump_numpy_constant (line 157) | void dump_numpy_constant()
type A (line 180) | struct A
type B (line 188) | struct B
class C (line 194) | class C
method C (line 198) | C() : m_array{0, 0, 0, 0} {}
method array_type (line 199) | array_type & array() { return m_array; }
type test_native_casters (line 204) | struct test_native_casters
method get_strided_view (line 214) | auto get_strided_view()
method get_array_adapter (line 219) | auto get_array_adapter()
method get_tensor_adapter (line 227) | auto get_tensor_adapter()
method get_owning_array_adapter (line 235) | auto get_owning_array_adapter()
function dtype_to_python (line 247) | xt::pyarray<A> dtype_to_python()
function dtype_from_python (line 255) | xt::pyarray<B> dtype_from_python(xt::pyarray<B>& b)
function char_array (line 267) | void char_array(xt::pyarray<char[20]>& carr)
function row_major_tensor (line 280) | void row_major_tensor(xt::pytensor<double, 3, xt::layout_type::row_major...
function col_major_array (line 288) | void col_major_array(xt::pyarray<double, xt::layout_type::column_major>&...
function xscalar (line 296) | xt::pytensor<int, 0> xscalar(const xt::pytensor<int, 1>& arg)
function test_rm (line 304) | void test_rm(ndarray<int>const& x)
function PYBIND11_MODULE (line 310) | PYBIND11_MODULE(xtensor_python_test, m)
FILE: test_python/setup.py
class get_pybind_include (line 19) | class get_pybind_include(object):
method __init__ (line 26) | def __init__(self, user=False):
method __str__ (line 29) | def __str__(self):
class get_numpy_include (line 33) | class get_numpy_include(object):
method __str__ (line 40) | def __str__(self):
function has_flag (line 63) | def has_flag(compiler, flagname):
function cpp_flag (line 77) | def cpp_flag(compiler):
class BuildExt (line 87) | class BuildExt(build_ext):
method build_extensions (line 97) | def build_extensions(self):
FILE: test_python/test_pyarray.py
class XtensorTest (line 25) | class XtensorTest(TestCase):
method test_rm (line 26) | def test_rm(self):
method test_example1 (line 29) | def test_example1(self):
method test_example2 (line 32) | def test_example2(self):
method test_example3 (line 38) | def test_example3(self):
method test_broadcast_addition (line 64) | def test_broadcast_addition(self):
method test_broadcast_subtraction (line 74) | def test_broadcast_subtraction(self):
method test_broadcast_multiplication (line 85) | def test_broadcast_multiplication(self):
method test_broadcast_division (line 96) | def test_broadcast_division(self):
method test_vectorize (line 107) | def test_vectorize(self):
method test_readme_example1 (line 114) | def test_readme_example1(self):
method test_complex_overload_reg (line 119) | def test_complex_overload_reg(self):
method test_complex_overload (line 125) | def test_complex_overload(self):
method test_readme_example2 (line 136) | def test_readme_example2(self):
method test_rect_to_polar (line 145) | def test_rect_to_polar(self):
method test_shape_comparison (line 150) | def test_shape_comparison(self):
method test_int_overload (line 157) | def test_int_overload(self):
method test_dtype (line 162) | def test_dtype(self):
method test_char_array (line 193) | def test_char_array(self):
method test_col_row_major (line 201) | def test_col_row_major(self):
method test_xscalar (line 222) | def test_xscalar(self):
method test_bad_argument_call (line 226) | def test_bad_argument_call(self):
method test_diff_shape_overload (line 233) | def test_diff_shape_overload(self):
method test_native_casters (line 241) | def test_native_casters(self):
class AttributeTest (line 308) | class AttributeTest(TestCase):
method setUp (line 310) | def setUp(self):
method test_copy (line 313) | def test_copy(self):
method test_reference (line 318) | def test_reference(self):
Condensed preview — 73 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (278K chars).
[
{
"path": ".github/workflows/linux.yml",
"chars": 3446,
"preview": "name: Linux\non:\n workflow_dispatch:\n pull_request:\n push:\n branches: [master]\nconcurrency:\n group: ${{ github.wor"
},
{
"path": ".github/workflows/osx.yml",
"chars": 1945,
"preview": "name: OSX\non:\n workflow_dispatch:\n pull_request:\n push:\n branches: [master]\nconcurrency:\n group: ${{ github.workf"
},
{
"path": ".github/workflows/windows.yml",
"chars": 1143,
"preview": "name: Windows\non:\n workflow_dispatch:\n pull_request:\n push:\n branches: [master]\nconcurrency:\n group: ${{ github.w"
},
{
"path": ".gitignore",
"chars": 606,
"preview": "# Prerequisites\n*.d\n\n# Compiled Object files\n*.slo\n*.lo\n*.o\n*.obj\n\n# Precompiled Headers\n*.gch\n*.pch\n\n# Compiled Dynamic"
},
{
"path": "CMakeLists.txt",
"chars": 6613,
"preview": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "LICENSE",
"chars": 1558,
"preview": "Copyright (c) 2016, Wolf Vollprecht, Johan Mabille and Sylvain Corlay\nCopyright (c) 2016, QuantStack\nAll rights reserved"
},
{
"path": "README.md",
"chars": 7400,
"preview": "# \n\n[ 2016, Johan Mabille and Syl"
},
{
"path": "benchmark/benchmark_pyarray.py",
"chars": 215,
"preview": "from benchmark_xtensor_python import sum_array\nimport numpy as np\n\nu = np.ones(1000000, dtype=float)\nfrom timeit import "
},
{
"path": "benchmark/benchmark_pybind_array.py",
"chars": 236,
"preview": "from benchmark_xtensor_python import pybind_sum_array\nimport numpy as np\n\nu = np.ones(1000000, dtype=float)\nfrom timeit "
},
{
"path": "benchmark/benchmark_pybind_vectorize.py",
"chars": 247,
"preview": "from benchmark_xtensor_python import pybind_rect_to_polar\nimport numpy as np\n\nfrom timeit import timeit\nw = np.ones(1000"
},
{
"path": "benchmark/benchmark_pytensor.py",
"chars": 240,
"preview": "from benchmark_xtensor_python import sum_tensor\nimport numpy as np\n\nu = np.ones(1000000, dtype=float)\n#print(sum_tensor("
},
{
"path": "benchmark/benchmark_pyvectorize.py",
"chars": 226,
"preview": "from benchmark_xtensor_python import rect_to_polar\nimport numpy as np\n\nfrom timeit import timeit\nw = np.ones(100000, dty"
},
{
"path": "benchmark/main.cpp",
"chars": 1646,
"preview": "#include \"pybind11/pybind11.h\"\n#include \"pybind11/numpy.h\"\n#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION\n#include \"n"
},
{
"path": "benchmark/setup.py",
"chars": 3387,
"preview": "from setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nimport sys\nimport os\nimport s"
},
{
"path": "cmake/FindNumPy.cmake",
"chars": 3678,
"preview": "# - Find the NumPy libraries\n# This module finds if NumPy is installed, and sets the following variables\n# indicating wh"
},
{
"path": "docs/Doxyfile",
"chars": 303,
"preview": "PROJECT_NAME = \"xtensor-python\"\nXML_OUTPUT = xml\nINPUT = ../include\nGENERATE_LATEX = NO\nGENER"
},
{
"path": "docs/Makefile",
"chars": 6626,
"preview": "# You can set these variables from the command line.\nSPHINXOPTS =\nSPHINXBUILD = sphinx-build\nPAPER =\nBUILDD"
},
{
"path": "docs/environment.yml",
"chars": 101,
"preview": "name: xtensor-python-docs\n\nchannels:\n - conda-forge\n\ndependencies:\n - breathe\n - sphinx_rtd_theme\n"
},
{
"path": "docs/make.bat",
"chars": 7285,
"preview": "@ECHO OFF\r\n\r\nREM Command file for Sphinx documentation\r\n\r\nif \"%SPHINXBUILD%\" == \"\" (\r\n\tset SPHINXBUILD=sphinx-build\r\n)\r\n"
},
{
"path": "docs/source/_static/main_stylesheet.css",
"chars": 60,
"preview": ".wy-nav-content{\n max-width: 1000px;\n margin: auto;\n}\n"
},
{
"path": "docs/source/api_reference.rst",
"chars": 399,
"preview": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n Distributed under the terms of the BSD 3-Clause License.\n\n "
},
{
"path": "docs/source/array_tensor.rst",
"chars": 1219,
"preview": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n Distributed under the terms of the BSD 3-Clause License.\n\n "
},
{
"path": "docs/source/basic_usage.rst",
"chars": 2532,
"preview": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n Distributed under the terms of the BSD 3-Clause License.\n\n "
},
{
"path": "docs/source/compilers.rst",
"chars": 806,
"preview": ".. Copyright (c) 2016, Johan Mabille, Sylvain Corlay and Wolf Vollprecht\n\n Distributed under the terms of the BSD 3-Cl"
},
{
"path": "docs/source/conf.py",
"chars": 879,
"preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\n\non_rtd = os.environ.get('READTHEDOCS', None"
},
{
"path": "docs/source/cookiecutter.rst",
"chars": 2091,
"preview": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n Distributed under the terms of the BSD 3-Clause License.\n\n "
},
{
"path": "docs/source/dev_build_options.rst",
"chars": 1193,
"preview": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n Distributed under the terms of the BSD 3-Clause License.\n\n "
},
{
"path": "docs/source/examples/copy_cast/CMakeLists.txt",
"chars": 402,
"preview": "cmake_minimum_required(VERSION 3.29)\n\nproject(mymodule)\n\nfind_package(pybind11 CONFIG REQUIRED)\nfind_package(xtensor REQ"
},
{
"path": "docs/source/examples/copy_cast/example.py",
"chars": 201,
"preview": "import mymodule\nimport numpy as np\n\nc = np.array([[1, 2, 3], [4, 5, 6]])\nassert np.isclose(np.sum(np.sin(c)), mymodule.s"
},
{
"path": "docs/source/examples/copy_cast/main.cpp",
"chars": 874,
"preview": "#include <numeric>\n#include <xtensor.hpp>\n#include <pybind11/pybind11.h>\n#define FORCE_IMPORT_ARRAY\n#include <xtensor-py"
},
{
"path": "docs/source/examples/readme_example_1/CMakeLists.txt",
"chars": 426,
"preview": "cmake_minimum_required(VERSION 3.29)\n\nproject(mymodule)\n\nfind_package(Python REQUIRED COMPONENTS Interpreter Development"
},
{
"path": "docs/source/examples/readme_example_1/example.py",
"chars": 124,
"preview": "import mymodule\nimport numpy as np\n\na = np.array([1, 2, 3])\nassert np.isclose(np.sum(np.sin(a)), mymodule.sum_of_sines(a"
},
{
"path": "docs/source/examples/readme_example_1/main.cpp",
"chars": 511,
"preview": "#include <numeric>\n#include <xtensor.hpp>\n#include <pybind11/pybind11.h>\n#define FORCE_IMPORT_ARRAY\n#include <xtensor-py"
},
{
"path": "docs/source/examples/sfinae/CMakeLists.txt",
"chars": 506,
"preview": "cmake_minimum_required(VERSION 3.29)\n\nproject(mymodule)\n\nfind_package(Python REQUIRED COMPONENTS Interpreter Development"
},
{
"path": "docs/source/examples/sfinae/example.py",
"chars": 197,
"preview": "import mymodule\nimport numpy as np\n\na = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64)\nb = np.array(a, copy=True)\nmy"
},
{
"path": "docs/source/examples/sfinae/main.cpp",
"chars": 225,
"preview": "#include \"mymodule.hpp\"\n#include <xtensor/io/xio.hpp>\n\nint main()\n{\n xt::xtensor<size_t, 2> a = xt::arange<size_t>(2 "
},
{
"path": "docs/source/examples/sfinae/mymodule.hpp",
"chars": 611,
"preview": "#include <xtensor/containers/xtensor.hpp>\n\nnamespace mymodule {\n\ntemplate <class T>\nstruct is_std_vector\n{\n static co"
},
{
"path": "docs/source/examples/sfinae/python.cpp",
"chars": 314,
"preview": "#include \"mymodule.hpp\"\n#include <pybind11/pybind11.h>\n#define FORCE_IMPORT_ARRAY\n#include <xtensor-python/pyarray.hpp>\n"
},
{
"path": "docs/source/examples.rst",
"chars": 6348,
"preview": "\n****************\n(CMake) Examples\n****************\n\nBasic example (from readme)\n===========================\n\nConsider t"
},
{
"path": "docs/source/index.rst",
"chars": 2951,
"preview": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n Distributed under the terms of the BSD 3-Clause License.\n\n "
},
{
"path": "docs/source/installation.rst",
"chars": 1785,
"preview": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n Distributed under the terms of the BSD 3-Clause License.\n\n "
},
{
"path": "docs/source/numpy_capi.rst",
"chars": 3030,
"preview": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n Distributed under the terms of the BSD 3-Clause License.\n\n "
},
{
"path": "docs/source/pyarray.rst",
"chars": 283,
"preview": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n Distributed under the terms of the BSD 3-Clause License.\n\n "
},
{
"path": "docs/source/pytensor.rst",
"chars": 286,
"preview": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n Distributed under the terms of the BSD 3-Clause License.\n\n "
},
{
"path": "docs/source/pyvectorize.rst",
"chars": 285,
"preview": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n Distributed under the terms of the BSD 3-Clause License.\n\n "
},
{
"path": "docs/source/releasing.rst",
"chars": 1415,
"preview": ".. Copyright (c) 2016, Johan Mabille and Sylvain Corlay\n\n Distributed under the terms of the BSD 3-Clause License.\n\n "
},
{
"path": "environment-dev.yml",
"chars": 238,
"preview": "name: xtensor-python\nchannels:\n - conda-forge\ndependencies:\n # Build dependencies\n - cmake\n - ninja\n # Host depende"
},
{
"path": "include/xtensor-python/pyarray.hpp",
"chars": 20043,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "include/xtensor-python/pyarray_backstrides.hpp",
"chars": 10631,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "include/xtensor-python/pycontainer.hpp",
"chars": 16987,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "include/xtensor-python/pynative_casters.hpp",
"chars": 2331,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "include/xtensor-python/pystrides_adaptor.hpp",
"chars": 8724,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "include/xtensor-python/pytensor.hpp",
"chars": 19104,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "include/xtensor-python/pyvectorize.hpp",
"chars": 2065,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "include/xtensor-python/xtensor_python_config.hpp",
"chars": 813,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "include/xtensor-python/xtensor_type_caster_base.hpp",
"chars": 11287,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "readthedocs.yml",
"chars": 207,
"preview": "version: 2\n\nbuild:\n os: \"ubuntu-22.04\"\n tools:\n python: \"mambaforge-22.9\"\n\nsphinx:\n # Path to Sphinx configuration"
},
{
"path": "test/CMakeLists.txt",
"chars": 3493,
"preview": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "test/copyGTest.cmake.in",
"chars": 1026,
"preview": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "test/downloadGTest.cmake.in",
"chars": 1078,
"preview": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "test/main.cpp",
"chars": 1146,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "test/test_common.hpp",
"chars": 26857,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "test/test_pyarray.cpp",
"chars": 9733,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "test/test_pyarray_traits.cpp",
"chars": 6246,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "test/test_pytensor.cpp",
"chars": 8559,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "test/test_pyvectorize.cpp",
"chars": 1913,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "test/test_sfinae.cpp",
"chars": 1936,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "test_python/main.cpp",
"chars": 12307,
"preview": "/***************************************************************************\n* Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "test_python/setup.py",
"chars": 4060,
"preview": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "test_python/test_pyarray.py",
"chars": 11414,
"preview": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabi"
},
{
"path": "xtensor-python.pc.in",
"chars": 243,
"preview": "prefix=@CMAKE_INSTALL_PREFIX@\nincludedir=${prefix}/include\n\nName: xtensor-python\nDescription: An extension to the xtenso"
},
{
"path": "xtensor-pythonConfig.cmake.in",
"chars": 1115,
"preview": "############################################################################\n# Copyright (c) Wolf Vollprecht, Johan Mabi"
}
]
About this extraction
This page contains the full source code of the xtensor-stack/xtensor-python GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 73 files (258.4 KB), approximately 68.1k tokens, and a symbol index with 360 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.