Repository: nmwsharp/robust-laplacians-py Branch: master Commit: 668a4ff01746 Files: 24 Total size: 105.1 KB Directory structure: gitextract_2tlpb833/ ├── .github/ │ └── workflows/ │ ├── cibuildwheel_config.toml │ ├── publish.yml │ ├── test_build.yml │ ├── test_linux.yml │ ├── test_macos.yml │ └── test_windows.yml ├── .gitignore ├── .gitmodules ├── CMakeLists.txt ├── LICENSE ├── MANIFEST.in ├── README.md ├── deps/ │ └── jc_voronoi/ │ └── include/ │ └── jc_voronoi/ │ ├── jc_voronoi.h │ └── jc_voronoi_clip.h ├── misc/ │ └── conditional_deploy_build.py ├── pyproject.toml ├── src/ │ ├── cpp/ │ │ ├── .clang-format │ │ ├── core.cpp │ │ ├── point_cloud_utilities.cpp │ │ └── point_cloud_utilities.h │ └── robust_laplacian/ │ ├── __init__.py │ └── core.py └── test/ ├── robust_laplacian_test.py └── sample.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/cibuildwheel_config.toml ================================================ [tool.cibuildwheel] skip = "cp36-*" # scikit-build-core requires >=3.7 build-verbosity = 3 [tool.cibuildwheel.linux] before-all = [ "yum remove -y cmake", ] # musllinux builds on an Alpinx Linux image, no need to mess with cmake there [[tool.cibuildwheel.overrides]] select = "*-musllinux*" before-all = "" ================================================ FILE: .github/workflows/publish.yml ================================================ name: Build and Publish # NOTE: build logic is duplicated here and in test_build.yml # Run on the main branch for commits only on: push: branches: - master jobs: build_wheels: # only run if the most recent commit contains '[ci publish]' if: "contains(github.event.head_commit.message, '[ci publish]')" strategy: matrix: os: [ubuntu-latest, ubuntu-24.04-arm, windows-latest, macos-15-intel, macos-latest] name: Build wheels ${{ matrix.os }} runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 with: submodules: 'recursive' - uses: actions/setup-python@v6 with: python-version: '3.12' - name: Package source distribution # make sure this only happens on one of the runners, not repeated on all if: matrix.os == 'ubuntu-latest' run: | python -m pip install build python -m build --sdist - name: Run cibuildwheel uses: pypa/cibuildwheel@v3.3.1 with: config-file: ".github/workflows/cibuildwheel_config.toml" - name: Copy source distribution into wheelhouse if: matrix.os == 'ubuntu-latest' run: mv dist/*.tar.gz wheelhouse/ # Upload binaries to the github artifact store - name: Upload wheels uses: actions/upload-artifact@v4 with: name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }} path: | ./wheelhouse/*.whl ./wheelhouse/*.tar.gz overwrite: true # Push the resulting binaries to pypi on a tag starting with 'v' upload_pypi: name: Upload release to PyPI # only run if the most recent commit contains '[ci publish]' if: "contains(github.event.head_commit.message, '[ci publish]')" needs: [build_wheels] runs-on: ubuntu-latest environment: name: pypi url: https://pypi.org/p/robust-laplacian/ permissions: # we authenticate via PyPI's 'trusted publisher' workflow, this permission is required id-token: write steps: - name: Download built wheels artifact # downloads from the jobs storage from the previous step uses: actions/download-artifact@v4.2.1 with: # omitting the `name: ` field downloads all artifacts from this workflow path: dist - name: List downloaded files from artifact run: ls -lR dist # dist directory has subdirs from the different jobs, merge them into one directory and delete # the empty leftover dirs - name: Flatten directory run: find dist -mindepth 2 -type f -exec mv -t dist {} + && find dist -type d -empty -delete - name: List downloaded files from artifact after flatten run: ls -lR dist - name: Publish package to PyPI uses: pypa/gh-action-pypi-publish@release/v1 # with: # To test: repository_url: https://test.pypi.org/legacy/ ================================================ FILE: .github/workflows/test_build.yml ================================================ name: Test Build # NOTE: build logic is duplicated here and in publish.yml # Run on the master branch commit push and PRs to master (note conditional below) on: push: branches: - master pull_request: branches: - master jobs: build_wheels: # Only run if the commit message contains '[ci build]' if: "contains(toJSON(github.event.commits.*.message), '[ci build]') || contains(toJSON(github.event.pull_request.title), '[ci build]')" strategy: fail-fast: false matrix: os: [ubuntu-latest, ubuntu-24.04-arm, windows-latest, macos-15-intel, macos-latest] name: Build wheels ${{ matrix.os }} runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 with: submodules: 'recursive' - uses: actions/setup-python@v6 with: python-version: '3.12' - name: Package source distribution # make sure this only happens on one of the runners, not repeated on all if: matrix.os == 'ubuntu-latest' run: | python -m pip install build python -m build --sdist - name: Run cibuildwheel uses: pypa/cibuildwheel@v3.3.1 with: config-file: ".github/workflows/cibuildwheel_config.toml" - name: Copy source distribution into wheelhouse if: matrix.os == 'ubuntu-latest' run: mv dist/*.tar.gz wheelhouse/ # Upload binaries to the github artifact store - name: Upload wheels uses: actions/upload-artifact@v4 with: name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }} path: | ./wheelhouse/*.whl ./wheelhouse/*.tar.gz overwrite: true # Upload binaries to the github artifact store - name: Upload wheels uses: actions/upload-artifact@v4 with: name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }} path: | ./wheelhouse/*.whl ./wheelhouse/*.tar.gz overwrite: true ================================================ FILE: .github/workflows/test_linux.yml ================================================ name: Test Linux on: push: branches: [ master ] pull_request: branches: [ master ] jobs: build: runs-on: ubuntu-latest if: "! contains(toJSON(github.event.commits.*.message), '[ci skip]')" steps: - uses: actions/checkout@v1 with: submodules: 'recursive' - uses: actions/setup-python@v5 name: Install Python with: python-version: '3.9' - name: install python packages run: python3 -m pip install numpy scipy - name: configure run: mkdir build && cd build && cmake -DCMAKE_BUILD_TYPE=Debug -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") .. - name: build run: cd build && make - name: run test run: python3 test/robust_laplacian_test.py ================================================ FILE: .github/workflows/test_macos.yml ================================================ name: Test macOS on: push: branches: [ master ] pull_request: branches: [ master ] jobs: build: runs-on: macos-latest if: "! contains(toJSON(github.event.commits.*.message), '[ci skip]')" steps: - uses: actions/checkout@v1 with: submodules: 'recursive' - uses: actions/setup-python@v5 name: Install Python with: python-version: '3.9' - name: install python packages run: python3 -m pip install numpy scipy - name: configure run: mkdir build && cd build && cmake -DCMAKE_BUILD_TYPE=Debug .. - name: build run: cd build && make - name: run test run: python3 test/robust_laplacian_test.py ================================================ FILE: .github/workflows/test_windows.yml ================================================ name: Test Windows on: push: branches: [ master ] pull_request: branches: [ master ] jobs: build: runs-on: windows-latest if: "! contains(toJSON(github.event.commits.*.message), '[ci skip]')" steps: - uses: actions/checkout@v1 with: submodules: 'recursive' - uses: actions/setup-python@v5 name: Install Python with: python-version: '3.9' - name: install python packages run: python -m pip install numpy scipy - name: configure run: mkdir build && cd build && cmake -DCMAKE_BUILD_TYPE=Debug .. - name: build run: cd build && cmake --build "." - name: run test run: python test/robust_laplacian_test.py ================================================ FILE: .gitignore ================================================ # Build directories build/ build_debug/ dist/ *.pyc __pycache__/ *.egg-info # Editor and OS things imgui.ini .polyscope.ini .DS_Store .vscode *.swp tags *.blend1 # Prerequisites *.d # Compiled Object files *.slo *.lo *.o *.obj # Precompiled Headers *.gch *.pch # Compiled Dynamic libraries *.so *.dylib *.dll # Fortran module files *.mod *.smod # Compiled Static libraries *.lai *.la *.a *.lib # Executables *.exe *.out *.app ================================================ FILE: .gitmodules ================================================ [submodule "deps/geometry-central"] path = deps/geometry-central url = https://github.com/nmwsharp/geometry-central.git [submodule "deps/pybind11"] path = deps/pybind11 url = https://github.com/pybind/pybind11.git [submodule "deps/eigen"] path = deps/eigen url = https://gitlab.com/libeigen/eigen.git ================================================ FILE: CMakeLists.txt ================================================ cmake_minimum_required(VERSION 3.5.0) project(robust-laplacian-py) # Recurse in to pybind set(PYBIND11_NEWPYTHON ON) add_subdirectory(deps/pybind11) # set location of eigen for geometry-central set(GC_EIGEN_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/deps/eigen" CACHE PATH "my path") # geometry-central set(CMAKE_POSITION_INDEPENDENT_CODE ON) add_subdirectory(deps/geometry-central) pybind11_add_module(robust_laplacian_bindings src/cpp/point_cloud_utilities.cpp src/cpp/core.cpp ) include_directories(robust_laplacian_bindings ${CMAKE_CURRENT_SOURCE_DIR}/src/cpp) include_directories(robust_laplacian_bindings ${CMAKE_CURRENT_SOURCE_DIR}/deps/jc_voronoi/include) target_link_libraries(robust_laplacian_bindings PRIVATE geometry-central) install(TARGETS robust_laplacian_bindings LIBRARY DESTINATION .) ================================================ FILE: LICENSE ================================================ MIT License Copyright (c) 2020 Nicholas Sharp Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: MANIFEST.in ================================================ include README.md LICENSE include CMakeLists.txt recursive-include deps/geometry-central *.cpp *.h *.ipp *.hpp *.cmake CMakeLists.txt recursive-include deps/jc_voronoi * recursive-include deps/eigen/Eigen * include deps/eigen/COPYING* include deps/eigen/README.md recursive-include deps/pybind11 *.h CMakeLists.txt *.cmake recursive-include src *.cpp *.h recursive-include src/robust-laplacian *.py exclude dist ================================================ FILE: README.md ================================================ [![actions status linux](https://github.com/nmwsharp/robust-laplacians-py/workflows/Test%20Linux/badge.svg)](https://github.com/nmwsharp/robust-laplacians-py/actions) [![actions status macOS](https://github.com/nmwsharp/robust-laplacians-py/workflows/Test%20macOS/badge.svg)](https://github.com/nmwsharp/robust-laplacians-py/actions) [![actions status windows](https://github.com/nmwsharp/robust-laplacians-py/workflows/Test%20Windows/badge.svg)](https://github.com/nmwsharp/robust-laplacians-py/actions) [![PyPI](https://img.shields.io/pypi/v/robust-laplacian?style=plastic)](https://pypi.org/project/robust-laplacian/) A Python package for high-quality Laplace matrices on meshes and point clouds. `pip install robust_laplacian` The Laplacian is at the heart of many algorithms across geometry processing, simulation, and machine learning. This library builds a high-quality, robust Laplace matrix which often improves the performance of these algorithms, and wraps it all up in a simple, single-function API! **Sample**: computing eigenvectors of the point cloud Laplacian ![demo image of eigenvectors on point cloud](https://github.com/nmwsharp/robust-laplacians-py/blob/master/teaser_cloud.jpg?raw=true) Given as input a triangle mesh with arbitrary connectivity (could be nonmanifold, have boundary, etc), OR a point cloud, this library builds an `NxN` sparse Laplace matrix, where `N` is the number of vertices/points. This Laplace matrix is similar to the _cotan-Laplacian_ used widely in geometric computing, but internally the algorithm constructs an _intrinsic Delaunay triangulation_ of the surface, which gives the Laplace matrix great numerical properties. The resulting Laplacian is always a symmetric positive-definite matrix, with all positive edge weights. Additionally, this library performs _intrinsic mollification_ to alleviate floating-point issues with degenerate triangles. The resulting Laplace matrix `L` is a "weak" Laplace matrix, so we also generate a diagonal lumped mass matrix `M`, where each diagonal entry holds an area associated with the mesh element. The "strong" Laplacian can then be formed as `M^-1 L`, or a Poisson problem could be solved as `L x = M y`. A [C++ implementation and demo](https://github.com/nmwsharp/nonmanifold-laplacian) is available. This library implements the algorithm described in [A Laplacian for Nonmanifold Triangle Meshes](http://www.cs.cmu.edu/~kmcrane/Projects/NonmanifoldLaplace/NonmanifoldLaplace.pdf) by [Nicholas Sharp](http://nmwsharp.com) and [Keenan Crane](http://keenan.is/here) at SGP 2020 (where it won a best paper award!). See the paper for more details, and please use the citation given at the bottom if it contributes to academic work. ### Example Build a point cloud Laplacian, compute its first 10 eigenvectors, and visualize with [Polyscope](https://polyscope.run/py/) ```shell pip install numpy scipy plyfile polyscope robust_laplacian ``` ```py import robust_laplacian from plyfile import PlyData import numpy as np import polyscope as ps import scipy.sparse.linalg as sla # Read input plydata = PlyData.read("/path/to/cloud.ply") points = np.vstack(( plydata['vertex']['x'], plydata['vertex']['y'], plydata['vertex']['z'] )).T # Build point cloud Laplacian L, M = robust_laplacian.point_cloud_laplacian(points) # (or for a mesh) # L, M = robust_laplacian.mesh_laplacian(verts, faces) # Compute some eigenvectors n_eig = 10 evals, evecs = sla.eigsh(L, n_eig, M, sigma=1e-8) # Visualize ps.init() ps_cloud = ps.register_point_cloud("my cloud", points) for i in range(n_eig): ps_cloud.add_scalar_quantity("eigenvector_"+str(i), evecs[:,i], enabled=True) ps.show() ``` **_NOTE:_** No one can agree on the sign convention for the Laplacian. This library builds the _positive semi-definite_ Laplace matrix, where the diagonal entries are positive and off-diagonal entries are negative. This is the _opposite_ of the sign used by e.g. libIGL in `igl.cotmatrix`, so you may need to flip a sign when converting code. ### API This package exposes just two functions: - `mesh_laplacian(verts, faces, mollify_factor=1e-5)` - `verts` is an `V x 3` numpy array of vertex positions - `faces` is an `F x 3` numpy array of face indices, where each is a 0-based index referring to a vertex - `mollify_factor` amount of intrinsic mollifcation to perform. `0` disables, larger values will increase numerical stability, while very large values will slightly implicitly smooth out the geometry. The range of reasonable settings is roughly `0` to `1e-3`. The default value should usually be sufficient. - `return L, M` a pair of scipy sparse matrices for the Laplacian `L` and mass matrix `M` - `point_cloud_laplacian(points, mollify_factor=1e-5, n_neighbors=30)` - `points` is an `V x 3` numpy array of point positions - `mollify_factor` amount of intrinsic mollifcation to perform. `0` disables, larger values will increase numerical stability, while very large values will slightly implicitly smooth out the geometry. The range of reasonable settings is roughly `0` to `1e-3`. The default value should usually be sufficient. - `n_neighbors` is the number of nearest neighbors to use when constructing local triangulations. This parameter has little effect on the resulting matrices, and the default value is almost always sufficient. - `return L, M` a pair of scipy sparse matrices for the Laplacian `L` and mass matrix `M` ### Installation The package is availabe via `pip` ``` pip install robust_laplacian ``` The underlying algorithm is implemented in C++; the pypi entry includes precompiled binaries for many platforms. Very old versions of `pip` might need to be upgraded like `pip install pip --upgrade` to use the precompiled binaries. Alternately, if no precompiled binary matches your system `pip` will attempt to compile from source on your machine. This requires a working C++ toolchain, including cmake. ### Known limitations - For point clouds, this repo uses a simple method to generate planar Delaunay triangulations, which may not be totally robust to collinear or degenerate point clouds. ### Dependencies This python library is mainly a wrapper around the implementation in the [geometry-central](http://geometry-central.net) library; see there for further dependencies. Additionally, this library uses [pybind11](https://github.com/pybind/pybind11) to generate bindings, and [jc_voronoi](https://github.com/JCash/voronoi) for 2D Delaunay triangulation on point clouds. All are permissively licensed. ### Citation ``` @article{Sharp:2020:LNT, author={Nicholas Sharp and Keenan Crane}, title={{A Laplacian for Nonmanifold Triangle Meshes}}, journal={Computer Graphics Forum (SGP)}, volume={39}, number={5}, year={2020} } ``` ### For developers This repo is configured with CI on github actions to build wheels across platform. ### Deploy a new version - Commit the desired updates to the `master` branch (or via PR). Include the string `[ci build]` in the commit message to ensure a build happens. - Watch the github actions builds to ensure the test & build stages succeed and all wheels are compiled. - While you're waiting, update the docs. - Create a commit bumping the version in `pyproject.toml`. Include the string `[ci publish]` in the commit message and push. This will kick off a new github actions build which deploys the wheels to PyPI after compilation. Use the github UI to create a new release + tag matching the version in `pyproject.toml`. ================================================ FILE: deps/jc_voronoi/include/jc_voronoi/jc_voronoi.h ================================================ // Copyright (c) 2015-2019 Mathias Westerdahl // For LICENSE (MIT), USAGE or HISTORY, see bottom of file #ifndef JC_VORONOI_H #define JC_VORONOI_H #include #include #include #include #ifdef __cplusplus extern "C" { #endif #ifndef JCV_REAL_TYPE #define JCV_REAL_TYPE float #endif #ifndef JCV_ATAN2 #define JCV_ATAN2(_Y_, _X_) atan2f(_Y_, _X_) #endif #ifndef JCV_SQRT #define JCV_SQRT(_X_) sqrtf(_X_) #endif #ifndef JCV_PI #define JCV_PI 3.14159265358979323846264338327950288f #endif #ifndef JCV_FLT_MAX #define JCV_FLT_MAX 3.402823466e+38F #endif #ifndef JCV_EDGE_INTERSECT_THRESHOLD // Fix for Issue #40 #define JCV_EDGE_INTERSECT_THRESHOLD 1.0e-10F #endif typedef JCV_REAL_TYPE jcv_real; typedef struct _jcv_point jcv_point; typedef struct _jcv_rect jcv_rect; typedef struct _jcv_site jcv_site; typedef struct _jcv_edge jcv_edge; typedef struct _jcv_graphedge jcv_graphedge; typedef struct _jcv_diagram jcv_diagram; typedef struct _jcv_clipper jcv_clipper; typedef struct _jcv_context_internal jcv_context_internal; /// Tests if a point is inside the final shape typedef int (*jcv_clip_test_point_fn)(const jcv_clipper* clipper, const jcv_point p); /** Given an edge, and the clipper, calculates the e->pos[0] and e->pos[1] * Returns 0 if not successful */ typedef int (*jcv_clip_edge_fn)(const jcv_clipper* clipper, jcv_edge* e); /** Given the clipper, the site and the last edge, * closes any gaps in the polygon by adding new edges that follow the bounding shape * The internal context is use when allocating new edges. */ typedef void (*jcv_clip_fillgap_fn)(const jcv_clipper* clipper, jcv_context_internal* allocator, jcv_site* s); /** * Uses malloc * If a clipper is not supplied, a default box clipper will be used * If rect is null, an automatic bounding box is calculated, with an extra padding of 10 units * All points will be culled against the bounding rect, and all edges will be clipped against it. */ extern void jcv_diagram_generate( int num_points, const jcv_point* points, const jcv_rect* rect, const jcv_clipper* clipper, jcv_diagram* diagram ); typedef void* (*FJCVAllocFn)(void* userctx, size_t size); typedef void (*FJCVFreeFn)(void* userctx, void* p); // Same as above, but allows the client to use a custom allocator extern void jcv_diagram_generate_useralloc( int num_points, const jcv_point* points, const jcv_rect* rect, const jcv_clipper* clipper, void* userallocctx, FJCVAllocFn allocfn, FJCVFreeFn freefn, jcv_diagram* diagram ); // Uses free (or the registered custom free function) extern void jcv_diagram_free( jcv_diagram* diagram ); // Returns an array of sites, where each index is the same as the original input point array. extern const jcv_site* jcv_diagram_get_sites( const jcv_diagram* diagram ); // Returns a linked list of all the voronoi edges // excluding the ones that lie on the borders of the bounding box. // For a full list of edges, you need to iterate over the sites, and their graph edges. extern const jcv_edge* jcv_diagram_get_edges( const jcv_diagram* diagram ); // Iterates over a list of edges, skipping invalid edges (where p0==p1) extern const jcv_edge* jcv_diagram_get_next_edge( const jcv_edge* edge ); // For the default clipper extern int jcv_boxshape_test(const jcv_clipper* clipper, const jcv_point p); extern int jcv_boxshape_clip(const jcv_clipper* clipper, jcv_edge* e); extern void jcv_boxshape_fillgaps(const jcv_clipper* clipper, jcv_context_internal* allocator, jcv_site* s); #pragma pack(push, 1) struct _jcv_point { jcv_real x; jcv_real y; }; struct _jcv_graphedge { struct _jcv_graphedge* next; struct _jcv_edge* edge; struct _jcv_site* neighbor; jcv_point pos[2]; jcv_real angle; }; struct _jcv_site { jcv_point p; int index; // Index into the original list of points jcv_graphedge* edges; // The half edges owned by the cell }; // The coefficients a, b and c are from the general line equation: ax * by + c = 0 struct _jcv_edge { struct _jcv_edge* next; jcv_site* sites[2]; jcv_point pos[2]; jcv_real a; jcv_real b; jcv_real c; }; struct _jcv_rect { jcv_point min; jcv_point max; }; struct _jcv_clipper { jcv_clip_test_point_fn test_fn; jcv_clip_edge_fn clip_fn; jcv_clip_fillgap_fn fill_fn; jcv_point min; // The bounding rect min jcv_point max; // The bounding rect max void* ctx; // User defined context }; struct _jcv_diagram { jcv_context_internal* internal; jcv_edge* edges; jcv_site* sites; int numsites; jcv_point min; jcv_point max; }; #pragma pack(pop) #ifdef __cplusplus } #endif #endif // JC_VORONOI_H #ifdef JC_VORONOI_IMPLEMENTATION #undef JC_VORONOI_IMPLEMENTATION #include // INTERNAL FUNCTIONS #if defined(_MSC_VER) && !defined(__cplusplus) #define inline __inline #endif // jcv_point static inline int jcv_point_cmp(const void* p1, const void* p2) { const jcv_point* s1 = (const jcv_point*) p1; const jcv_point* s2 = (const jcv_point*) p2; return (s1->y != s2->y) ? (s1->y < s2->y ? -1 : 1) : (s1->x < s2->x ? -1 : 1); } static inline int jcv_point_less( const jcv_point* pt1, const jcv_point* pt2 ) { return (pt1->y == pt2->y) ? (pt1->x < pt2->x) : pt1->y < pt2->y; } static inline int jcv_point_eq( const jcv_point* pt1, const jcv_point* pt2 ) { return (pt1->y == pt2->y) && (pt1->x == pt2->x); } static inline int jcv_point_on_box_edge( const jcv_point* pt, const jcv_point* min, const jcv_point* max ) { return pt->x == min->x || pt->y == min->y || pt->x == max->x || pt->y == max->y; } static inline jcv_real jcv_point_dist_sq( const jcv_point* pt1, const jcv_point* pt2) { jcv_real diffx = pt1->x - pt2->x; jcv_real diffy = pt1->y - pt2->y; return diffx * diffx + diffy * diffy; } static inline jcv_real jcv_point_dist( const jcv_point* pt1, const jcv_point* pt2 ) { return (jcv_real)(JCV_SQRT(jcv_point_dist_sq(pt1, pt2))); } // Structs #pragma pack(push, 1) typedef struct _jcv_halfedge { jcv_edge* edge; struct _jcv_halfedge* left; struct _jcv_halfedge* right; jcv_point vertex; jcv_real y; int direction; // 0=left, 1=right int pqpos; } jcv_halfedge; typedef struct _jcv_memoryblock { size_t sizefree; struct _jcv_memoryblock* next; char* memory; } jcv_memoryblock; typedef int (*FJCVPriorityQueuePrint)(const void* node, int pos); typedef struct _jcv_priorityqueue { // Implements a binary heap int maxnumitems; int numitems; void** items; } jcv_priorityqueue; struct _jcv_context_internal { void* mem; jcv_edge* edges; jcv_halfedge* beachline_start; jcv_halfedge* beachline_end; jcv_halfedge* last_inserted; jcv_priorityqueue* eventqueue; jcv_site* sites; jcv_site* bottomsite; int numsites; int currentsite; int _padding; jcv_memoryblock* memblocks; jcv_edge* edgepool; jcv_halfedge* halfedgepool; void** eventmem; jcv_clipper clipper; void* memctx; // Given by the user FJCVAllocFn alloc; FJCVFreeFn free; jcv_rect rect; }; #pragma pack(pop) static const int JCV_DIRECTION_LEFT = 0; static const int JCV_DIRECTION_RIGHT = 1; static const jcv_real JCV_INVALID_VALUE = (jcv_real)-JCV_FLT_MAX; void jcv_diagram_free( jcv_diagram* d ) { jcv_context_internal* internal = d->internal; void* memctx = internal->memctx; FJCVFreeFn freefn = internal->free; while(internal->memblocks) { jcv_memoryblock* p = internal->memblocks; internal->memblocks = internal->memblocks->next; freefn( memctx, p ); } freefn( memctx, internal->mem ); } const jcv_site* jcv_diagram_get_sites( const jcv_diagram* diagram ) { return diagram->internal->sites; } const jcv_edge* jcv_diagram_get_edges( const jcv_diagram* diagram ) { jcv_edge e; e.next = diagram->internal->edges; return jcv_diagram_get_next_edge(&e); } const jcv_edge* jcv_diagram_get_next_edge( const jcv_edge* edge ) { const jcv_edge* e = edge->next; while (e != 0 && jcv_point_eq(&e->pos[0], &e->pos[1])) { e = e->next; } return e; } static void* jcv_alloc(jcv_context_internal* internal, size_t size) { if( !internal->memblocks || internal->memblocks->sizefree < size ) { size_t blocksize = 16 * 1024; jcv_memoryblock* block = (jcv_memoryblock*)internal->alloc( internal->memctx, blocksize ); size_t offset = sizeof(jcv_memoryblock); block->sizefree = blocksize - offset; block->next = internal->memblocks; block->memory = ((char*)block) + offset; internal->memblocks = block; } void* p = internal->memblocks->memory; internal->memblocks->memory += size; internal->memblocks->sizefree -= size; return p; } static jcv_edge* jcv_alloc_edge(jcv_context_internal* internal) { return (jcv_edge*)jcv_alloc(internal, sizeof(jcv_edge)); } static jcv_halfedge* jcv_alloc_halfedge(jcv_context_internal* internal) { if( internal->halfedgepool ) { jcv_halfedge* edge = internal->halfedgepool; internal->halfedgepool = internal->halfedgepool->right; return edge; } return (jcv_halfedge*)jcv_alloc(internal, sizeof(jcv_halfedge)); } static jcv_graphedge* jcv_alloc_graphedge(jcv_context_internal* internal) { return (jcv_graphedge*)jcv_alloc(internal, sizeof(jcv_graphedge)); } static void* jcv_alloc_fn(void* memctx, size_t size) { (void)memctx; return malloc(size); } static void jcv_free_fn(void* memctx, void* p) { (void)memctx; free(p); } // jcv_edge static inline int jcv_is_valid(const jcv_point* p) { return (p->x != JCV_INVALID_VALUE || p->y != JCV_INVALID_VALUE) ? 1 : 0; } static void jcv_edge_create(jcv_edge* e, jcv_site* s1, jcv_site* s2) { e->next = 0; e->sites[0] = s1; e->sites[1] = s2; e->pos[0].x = JCV_INVALID_VALUE; e->pos[0].y = JCV_INVALID_VALUE; e->pos[1].x = JCV_INVALID_VALUE; e->pos[1].y = JCV_INVALID_VALUE; // Create line equation between S1 and S2: // jcv_real a = -1 * (s2->p.y - s1->p.y); // jcv_real b = s2->p.x - s1->p.x; // //jcv_real c = -1 * (s2->p.x - s1->p.x) * s1->p.y + (s2->p.y - s1->p.y) * s1->p.x; // // // create perpendicular line // jcv_real pa = b; // jcv_real pb = -a; // //jcv_real pc = pa * s1->p.x + pb * s1->p.y; // // // Move to the mid point // jcv_real mx = s1->p.x + dx * jcv_real(0.5); // jcv_real my = s1->p.y + dy * jcv_real(0.5); // jcv_real pc = ( pa * mx + pb * my ); jcv_real dx = s2->p.x - s1->p.x; jcv_real dy = s2->p.y - s1->p.y; int dx_is_larger = (dx*dx) > (dy*dy); // instead of fabs // Simplify it, using dx and dy e->c = dx * (s1->p.x + dx * (jcv_real)0.5) + dy * (s1->p.y + dy * (jcv_real)0.5); if( dx_is_larger ) { e->a = (jcv_real)1; e->b = dy / dx; e->c /= dx; } else { e->a = dx / dy; e->b = (jcv_real)1; e->c /= dy; } } // CLIPPING int jcv_boxshape_test(const jcv_clipper* clipper, const jcv_point p) { return p.x >= clipper->min.x && p.x <= clipper->max.x && p.y >= clipper->min.y && p.y <= clipper->max.y; } // The line equation: ax + by + c = 0 // see jcv_edge_create int jcv_boxshape_clip(const jcv_clipper* clipper, jcv_edge* e) { jcv_real pxmin = clipper->min.x; jcv_real pxmax = clipper->max.x; jcv_real pymin = clipper->min.y; jcv_real pymax = clipper->max.y; jcv_real x1, y1, x2, y2; jcv_point* s1; jcv_point* s2; if (e->a == (jcv_real)1 && e->b >= (jcv_real)0) { s1 = jcv_is_valid(&e->pos[1]) ? &e->pos[1] : 0; s2 = jcv_is_valid(&e->pos[0]) ? &e->pos[0] : 0; } else { s1 = jcv_is_valid(&e->pos[0]) ? &e->pos[0] : 0; s2 = jcv_is_valid(&e->pos[1]) ? &e->pos[1] : 0; }; if (e->a == (jcv_real)1) // delta x is larger { y1 = pymin; if (s1 != 0 && s1->y > pymin) { y1 = s1->y; } if( y1 > pymax ) { y1 = pymax; } x1 = e->c - e->b * y1; y2 = pymax; if (s2 != 0 && s2->y < pymax) y2 = s2->y; if( y2 < pymin ) { y2 = pymin; } x2 = (e->c) - (e->b) * y2; // Never occurs according to lcov // if( ((x1 > pxmax) & (x2 > pxmax)) | ((x1 < pxmin) & (x2 < pxmin)) ) // { // return 0; // } if (x1 > pxmax) { x1 = pxmax; y1 = (e->c - x1) / e->b; } else if (x1 < pxmin) { x1 = pxmin; y1 = (e->c - x1) / e->b; } if (x2 > pxmax) { x2 = pxmax; y2 = (e->c - x2) / e->b; } else if (x2 < pxmin) { x2 = pxmin; y2 = (e->c - x2) / e->b; } } else // delta y is larger { x1 = pxmin; if( s1 != 0 && s1->x > pxmin ) x1 = s1->x; if( x1 > pxmax ) { x1 = pxmax; } y1 = e->c - e->a * x1; x2 = pxmax; if( s2 != 0 && s2->x < pxmax ) x2 = s2->x; if( x2 < pxmin ) { x2 = pxmin; } y2 = e->c - e->a * x2; // Never occurs according to lcov // if( ((y1 > pymax) & (y2 > pymax)) | ((y1 < pymin) & (y2 < pymin)) ) // { // return 0; // } if( y1 > pymax ) { y1 = pymax; x1 = (e->c - y1) / e->a; } else if( y1 < pymin ) { y1 = pymin; x1 = (e->c - y1) / e->a; } if( y2 > pymax ) { y2 = pymax; x2 = (e->c - y2) / e->a; } else if( y2 < pymin ) { y2 = pymin; x2 = (e->c - y2) / e->a; }; }; e->pos[0].x = x1; e->pos[0].y = y1; e->pos[1].x = x2; e->pos[1].y = y2; // If the two points are equal, the result is invalid return (x1 == x2 && y1 == y2) ? 0 : 1; } // The line equation: ax + by + c = 0 // see jcv_edge_create static int jcv_edge_clipline(jcv_context_internal* internal, jcv_edge* e) { return internal->clipper.clip_fn(&internal->clipper, e); } static jcv_edge* jcv_edge_new(jcv_context_internal* internal, jcv_site* s1, jcv_site* s2) { jcv_edge* e = jcv_alloc_edge(internal); jcv_edge_create(e, s1, s2); return e; } // jcv_halfedge static void jcv_halfedge_link(jcv_halfedge* edge, jcv_halfedge* newedge) { newedge->left = edge; newedge->right = edge->right; edge->right->left = newedge; edge->right = newedge; } static inline void jcv_halfedge_unlink(jcv_halfedge* he) { he->left->right = he->right; he->right->left = he->left; he->left = 0; he->right = 0; } static inline jcv_halfedge* jcv_halfedge_new(jcv_context_internal* internal, jcv_edge* e, int direction) { jcv_halfedge* he = jcv_alloc_halfedge(internal); he->edge = e; he->left = 0; he->right = 0; he->direction = direction; he->pqpos = 0; // These are set outside //he->y //he->vertex return he; } static void jcv_halfedge_delete(jcv_context_internal* internal, jcv_halfedge* he) { he->right = internal->halfedgepool; internal->halfedgepool = he; } static inline jcv_site* jcv_halfedge_leftsite(const jcv_halfedge* he) { return he->edge->sites[he->direction]; } static inline jcv_site* jcv_halfedge_rightsite(const jcv_halfedge* he) { return he->edge ? he->edge->sites[1 - he->direction] : 0; } static int jcv_halfedge_rightof(const jcv_halfedge* he, const jcv_point* p) { const jcv_edge* e = he->edge; const jcv_site* topsite = e->sites[1]; int right_of_site = (p->x > topsite->p.x) ? 1 : 0; if (right_of_site && he->direction == JCV_DIRECTION_LEFT) return 1; if (!right_of_site && he->direction == JCV_DIRECTION_RIGHT) return 0; jcv_real dxp, dyp, dxs, t1, t2, t3, yl; int above; if (e->a == (jcv_real)1) { dyp = p->y - topsite->p.y; dxp = p->x - topsite->p.x; int fast = 0; if( (!right_of_site & (e->b < (jcv_real)0)) | (right_of_site & (e->b >= (jcv_real)0)) ) { above = dyp >= e->b * dxp; fast = above; } else { above = (p->x + p->y * e->b) > e->c; if (e->b < (jcv_real)0) above = !above; if (!above) fast = 1; }; if (!fast) { dxs = topsite->p.x - e->sites[0]->p.x; above = e->b * (dxp * dxp - dyp * dyp) < dxs * dyp * ((jcv_real)1 + (jcv_real)2 * dxp / dxs + e->b * e->b); if (e->b < (jcv_real)0) above = !above; }; } else // e->b == 1 { yl = e->c - e->a * p->x; t1 = p->y - yl; t2 = p->x - topsite->p.x; t3 = yl - topsite->p.y; above = t1 * t1 > (t2 * t2 + t3 * t3); }; return (he->direction == JCV_DIRECTION_LEFT ? above : !above); } // Keeps the priority queue sorted with events sorted in ascending order // Return 1 if the edges needs to be swapped static inline int jcv_halfedge_compare( const jcv_halfedge* he1, const jcv_halfedge* he2 ) { return (he1->y == he2->y) ? he1->vertex.x > he2->vertex.x : he1->y > he2->y; } static int jcv_halfedge_intersect(const jcv_halfedge* he1, const jcv_halfedge* he2, jcv_point* out) { const jcv_edge* e1 = he1->edge; const jcv_edge* e2 = he2->edge; jcv_real d = e1->a * e2->b - e1->b * e2->a; if( ((jcv_real)-JCV_EDGE_INTERSECT_THRESHOLD < d && d < (jcv_real)JCV_EDGE_INTERSECT_THRESHOLD) ) { return 0; } out->x = (e1->c * e2->b - e1->b * e2->c) / d; out->y = (e1->a * e2->c - e1->c * e2->a) / d; const jcv_edge* e; const jcv_halfedge* he; if( jcv_point_less( &e1->sites[1]->p, &e2->sites[1]->p) ) { he = he1; e = e1; } else { he = he2; e = e2; } int right_of_site = out->x >= e->sites[1]->p.x; if ((right_of_site && he->direction == JCV_DIRECTION_LEFT) || (!right_of_site && he->direction == JCV_DIRECTION_RIGHT)) { return 0; } return 1; } // Priority queue static int jcv_pq_moveup(jcv_priorityqueue* pq, int pos) { jcv_halfedge** items = (jcv_halfedge**)pq->items; jcv_halfedge* node = items[pos]; for( int parent = (pos >> 1); pos > 1 && jcv_halfedge_compare(items[parent], node); pos = parent, parent = parent >> 1) { items[pos] = items[parent]; items[pos]->pqpos = pos; } node->pqpos = pos; items[pos] = node; return pos; } static int jcv_pq_maxchild(jcv_priorityqueue* pq, int pos) { int child = pos << 1; if( child >= pq->numitems ) return 0; jcv_halfedge** items = (jcv_halfedge**)pq->items; if( (child + 1) < pq->numitems && jcv_halfedge_compare(items[child], items[child+1]) ) return child+1; return child; } static int jcv_pq_movedown(jcv_priorityqueue* pq, int pos) { jcv_halfedge** items = (jcv_halfedge**)pq->items; jcv_halfedge* node = items[pos]; int child = jcv_pq_maxchild(pq, pos); while( child && jcv_halfedge_compare(node, items[child]) ) { items[pos] = items[child]; items[pos]->pqpos = pos; pos = child; child = jcv_pq_maxchild(pq, pos); } items[pos] = node; items[pos]->pqpos = pos; return pos; } static void jcv_pq_create(jcv_priorityqueue* pq, int capacity, void** buffer) { pq->maxnumitems = capacity; pq->numitems = 1; pq->items = buffer; } static int jcv_pq_empty(jcv_priorityqueue* pq) { return pq->numitems == 1 ? 1 : 0; } static int jcv_pq_push(jcv_priorityqueue* pq, void* node) { assert(pq->numitems < pq->maxnumitems); int n = pq->numitems++; pq->items[n] = node; return jcv_pq_moveup(pq, n); } static void* jcv_pq_pop(jcv_priorityqueue* pq) { void* node = pq->items[1]; pq->items[1] = pq->items[--pq->numitems]; jcv_pq_movedown(pq, 1); return node; } static void* jcv_pq_top(jcv_priorityqueue* pq) { return pq->items[1]; } static void jcv_pq_remove(jcv_priorityqueue* pq, jcv_halfedge* node) { if( pq->numitems == 1 ) return; int pos = node->pqpos; if( pos == 0 ) return; jcv_halfedge** items = (jcv_halfedge**)pq->items; items[pos] = items[--pq->numitems]; if( jcv_halfedge_compare( node, items[pos] ) ) jcv_pq_moveup( pq, pos ); else jcv_pq_movedown( pq, pos ); node->pqpos = pos; } // internal functions static inline jcv_site* jcv_nextsite(jcv_context_internal* internal) { return (internal->currentsite < internal->numsites) ? &internal->sites[internal->currentsite++] : 0; } static jcv_halfedge* jcv_get_edge_above_x(jcv_context_internal* internal, const jcv_point* p) { // Gets the arc on the beach line at the x coordinate (i.e. right above the new site event) // A good guess it's close by (Can be optimized) jcv_halfedge* he = internal->last_inserted; if( !he ) { if( p->x < (internal->rect.max.x - internal->rect.min.x) / 2 ) he = internal->beachline_start; else he = internal->beachline_end; } // if( he == internal->beachline_start || (he != internal->beachline_end && jcv_halfedge_rightof(he, p)) ) { do { he = he->right; } while( he != internal->beachline_end && jcv_halfedge_rightof(he, p) ); he = he->left; } else { do { he = he->left; } while( he != internal->beachline_start && !jcv_halfedge_rightof(he, p) ); } return he; } static int jcv_check_circle_event(const jcv_halfedge* he1, const jcv_halfedge* he2, jcv_point* vertex) { jcv_edge* e1 = he1->edge; jcv_edge* e2 = he2->edge; if( e1 == 0 || e2 == 0 || e1->sites[1] == e2->sites[1] ) { return 0; } return jcv_halfedge_intersect(he1, he2, vertex); } static void jcv_site_event(jcv_context_internal* internal, jcv_site* site) { jcv_halfedge* left = jcv_get_edge_above_x(internal, &site->p); jcv_halfedge* right = left->right; jcv_site* bottom = jcv_halfedge_rightsite(left); if( !bottom ) bottom = internal->bottomsite; jcv_edge* edge = jcv_edge_new(internal, bottom, site); edge->next = internal->edges; internal->edges = edge; jcv_halfedge* edge1 = jcv_halfedge_new(internal, edge, JCV_DIRECTION_LEFT); jcv_halfedge* edge2 = jcv_halfedge_new(internal, edge, JCV_DIRECTION_RIGHT); jcv_halfedge_link(left, edge1); jcv_halfedge_link(edge1, edge2); internal->last_inserted = right; jcv_point p; if( jcv_check_circle_event( left, edge1, &p ) ) { jcv_pq_remove(internal->eventqueue, left); left->vertex = p; left->y = p.y + jcv_point_dist(&site->p, &p); jcv_pq_push(internal->eventqueue, left); } if( jcv_check_circle_event( edge2, right, &p ) ) { edge2->vertex = p; edge2->y = p.y + jcv_point_dist(&site->p, &p); jcv_pq_push(internal->eventqueue, edge2); } } // https://cp-algorithms.com/geometry/oriented-triangle-area.html static inline jcv_real jcv_determinant(const jcv_point* a, const jcv_point* b, const jcv_point* c) { return (b->x - a->x)*(c->y - a->y) - (b->y - a->y)*(c->x - a->x); } static inline jcv_real jcv_calc_sort_metric(const jcv_site* site, const jcv_graphedge* edge) { // We take the average of the two points, since we can better distinguish between very small edges jcv_real half = 1/(jcv_real)2; jcv_real x = (edge->pos[0].x + edge->pos[1].x) * half; jcv_real y = (edge->pos[0].y + edge->pos[1].y) * half; jcv_real diffy = y - site->p.y; jcv_real angle = JCV_ATAN2( diffy, x - site->p.x ); if( diffy < 0 ) angle = angle + 2 * JCV_PI; return (jcv_real)angle; } static void jcv_sortedges_insert(jcv_site* site, jcv_graphedge* edge) { // Special case for the head end if (site->edges == 0 || site->edges->angle >= edge->angle) { edge->next = site->edges; site->edges = edge; } else { // Locate the node before the point of insertion jcv_graphedge* current = site->edges; while(current->next != 0 && current->next->angle < edge->angle) { current = current->next; } edge->next = current->next; current->next = edge; } } static void jcv_finishline(jcv_context_internal* internal, jcv_edge* e) { if( !jcv_edge_clipline(internal, e) ) { return; } // Make sure the graph edges are CCW int flip = jcv_determinant(&e->sites[0]->p, &e->pos[0], &e->pos[1]) > (jcv_real)0 ? 0 : 1; for( int i = 0; i < 2; ++i ) { jcv_graphedge* ge = jcv_alloc_graphedge(internal); ge->edge = e; ge->next = 0; ge->neighbor = e->sites[1-i]; ge->pos[flip] = e->pos[i]; ge->pos[1-flip] = e->pos[1-i]; ge->angle = jcv_calc_sort_metric(e->sites[i], ge); jcv_sortedges_insert( e->sites[i], ge ); // check that we didn't accidentally add a duplicate (rare), then remove it if( ge->next && ge->angle == ge->next->angle ) { if( jcv_point_eq( &ge->pos[0], &ge->next->pos[0] ) && jcv_point_eq( &ge->pos[1], &ge->next->pos[1] ) ) { ge->next = ge->next->next; // Throw it away, they're so few anyways } } } } static void jcv_endpos(jcv_context_internal* internal, jcv_edge* e, const jcv_point* p, int direction) { e->pos[direction] = *p; if( !jcv_is_valid(&e->pos[1 - direction]) ) return; jcv_finishline(internal, e); } static inline void jcv_create_corner_edge(jcv_context_internal* internal, const jcv_site* site, jcv_graphedge* current, jcv_graphedge* gap) { gap->neighbor = 0; gap->pos[0] = current->pos[1]; if( current->pos[1].x < internal->rect.max.x && current->pos[1].y == internal->rect.min.y ) { gap->pos[1].x = internal->rect.max.x; gap->pos[1].y = internal->rect.min.y; } else if( current->pos[1].x > internal->rect.min.x && current->pos[1].y == internal->rect.max.y ) { gap->pos[1].x = internal->rect.min.x; gap->pos[1].y = internal->rect.max.y; } else if( current->pos[1].y > internal->rect.min.y && current->pos[1].x == internal->rect.min.x ) { gap->pos[1].x = internal->rect.min.x; gap->pos[1].y = internal->rect.min.y; } else if( current->pos[1].y < internal->rect.max.y && current->pos[1].x == internal->rect.max.x ) { gap->pos[1].x = internal->rect.max.x; gap->pos[1].y = internal->rect.max.y; } gap->angle = jcv_calc_sort_metric(site, gap); } static jcv_edge* jcv_create_gap_edge(jcv_context_internal* internal, jcv_site* site, jcv_graphedge* ge) { jcv_edge* edge = jcv_alloc_edge(internal); edge->pos[0] = ge->pos[0]; edge->pos[1] = ge->pos[1]; edge->sites[0] = site; edge->sites[1] = 0; edge->a = edge->b = edge->c = 0; edge->next = internal->edges; internal->edges = edge; return edge; } void jcv_boxshape_fillgaps(const jcv_clipper* clipper, jcv_context_internal* allocator, jcv_site* site) { // They're sorted CCW, so if the current->pos[1] != next->pos[0], then we have a gap jcv_graphedge* current = site->edges; if( !current ) { // No edges, then it should be a single cell assert( allocator->numsites == 1 ); jcv_graphedge* gap = jcv_alloc_graphedge(allocator); gap->neighbor = 0; gap->pos[0] = clipper->min; gap->pos[1].x = clipper->max.x; gap->pos[1].y = clipper->min.y; gap->angle = jcv_calc_sort_metric(site, gap); gap->next = 0; gap->edge = jcv_create_gap_edge(allocator, site, gap); current = gap; site->edges = gap; } jcv_graphedge* next = current->next; if( !next ) { // Only one edge, then we assume it's a corner gap jcv_graphedge* gap = jcv_alloc_graphedge(allocator); jcv_create_corner_edge(allocator, site, current, gap); gap->edge = jcv_create_gap_edge(allocator, site, gap); gap->next = current->next; current->next = gap; current = gap; next = site->edges; } while( current && next ) { if( jcv_point_on_box_edge(¤t->pos[1], &clipper->min, &clipper->max) && !jcv_point_eq(¤t->pos[1], &next->pos[0]) ) { // Border gap if( current->pos[1].x == next->pos[0].x || current->pos[1].y == next->pos[0].y) { jcv_graphedge* gap = jcv_alloc_graphedge(allocator); gap->neighbor = 0; gap->pos[0] = current->pos[1]; gap->pos[1] = next->pos[0]; gap->angle = jcv_calc_sort_metric(site, gap); gap->edge = jcv_create_gap_edge(allocator, site, gap); gap->next = current->next; current->next = gap; } else if( jcv_point_on_box_edge(¤t->pos[1], &clipper->min, &clipper->max) && jcv_point_on_box_edge(&next->pos[0], &clipper->min, &clipper->max) ) { jcv_graphedge* gap = jcv_alloc_graphedge(allocator); jcv_create_corner_edge(allocator, site, current, gap); gap->edge = jcv_create_gap_edge(allocator, site, gap); gap->next = current->next; current->next = gap; } else { // something went wrong, abort instead of looping indefinitely break; } } current = current->next; if( current ) { next = current->next; if( !next ) next = site->edges; } } } // Since the algorithm leaves gaps at the borders/corner, we want to fill them static void jcv_fillgaps(jcv_diagram* diagram) { jcv_context_internal* internal = diagram->internal; if (!internal->clipper.fill_fn) return; for( int i = 0; i < internal->numsites; ++i ) { jcv_site* site = &internal->sites[i]; internal->clipper.fill_fn(&internal->clipper, internal, site); } } static void jcv_circle_event(jcv_context_internal* internal) { jcv_halfedge* left = (jcv_halfedge*)jcv_pq_pop(internal->eventqueue); jcv_halfedge* leftleft = left->left; jcv_halfedge* right = left->right; jcv_halfedge* rightright= right->right; jcv_site* bottom = jcv_halfedge_leftsite(left); jcv_site* top = jcv_halfedge_rightsite(right); jcv_point vertex = left->vertex; jcv_endpos(internal, left->edge, &vertex, left->direction); jcv_endpos(internal, right->edge, &vertex, right->direction); internal->last_inserted = rightright; jcv_pq_remove(internal->eventqueue, right); jcv_halfedge_unlink(left); jcv_halfedge_unlink(right); jcv_halfedge_delete(internal, left); jcv_halfedge_delete(internal, right); int direction = JCV_DIRECTION_LEFT; if( bottom->p.y > top->p.y ) { jcv_site* temp = bottom; bottom = top; top = temp; direction = JCV_DIRECTION_RIGHT; } jcv_edge* edge = jcv_edge_new(internal, bottom, top); edge->next = internal->edges; internal->edges = edge; jcv_halfedge* he = jcv_halfedge_new(internal, edge, direction); jcv_halfedge_link(leftleft, he); jcv_endpos(internal, edge, &vertex, JCV_DIRECTION_RIGHT - direction); jcv_point p; if( jcv_check_circle_event( leftleft, he, &p ) ) { jcv_pq_remove(internal->eventqueue, leftleft); leftleft->vertex = p; leftleft->y = p.y + jcv_point_dist(&bottom->p, &p); jcv_pq_push(internal->eventqueue, leftleft); } if( jcv_check_circle_event( he, rightright, &p ) ) { he->vertex = p; he->y = p.y + jcv_point_dist(&bottom->p, &p); jcv_pq_push(internal->eventqueue, he); } } static inline jcv_real jcv_floor(jcv_real v) { jcv_real i = (jcv_real)(int)v; return (v < i) ? i - 1 : i; } static inline jcv_real jcv_ceil(jcv_real v) { jcv_real i = (jcv_real)(int)v; return (v > i) ? i + 1 : i; } static inline jcv_real jcv_min(jcv_real a, jcv_real b) { return a < b ? a : b; } static inline jcv_real jcv_max(jcv_real a, jcv_real b) { return a > b ? a : b; } void jcv_diagram_generate( int num_points, const jcv_point* points, const jcv_rect* rect, const jcv_clipper* clipper, jcv_diagram* d ) { jcv_diagram_generate_useralloc(num_points, points, rect, clipper, 0, jcv_alloc_fn, jcv_free_fn, d); } typedef union _jcv_cast_align_struct { char* charp; void** voidpp; } jcv_cast_align_struct; static inline void jcv_rect_union(jcv_rect* rect, const jcv_point* p) { rect->min.x = jcv_min(rect->min.x, p->x); rect->min.y = jcv_min(rect->min.y, p->y); rect->max.x = jcv_max(rect->max.x, p->x); rect->max.y = jcv_max(rect->max.y, p->y); } static inline void jcv_rect_round(jcv_rect* rect) { rect->min.x = jcv_floor(rect->min.x); rect->min.y = jcv_floor(rect->min.y); rect->max.x = jcv_ceil(rect->max.x); rect->max.y = jcv_ceil(rect->max.y); } static inline void jcv_rect_inflate(jcv_rect* rect, jcv_real amount) { rect->min.x -= amount; rect->min.y -= amount; rect->max.x += amount; rect->max.y += amount; } static int jcv_prune_duplicates(jcv_context_internal* internal, jcv_rect* rect) { int num_sites = internal->numsites; jcv_site* sites = internal->sites; jcv_rect r; r.min.x = r.min.y = JCV_FLT_MAX; r.max.x = r.max.y = -JCV_FLT_MAX; int offset = 0; // Prune duplicates first for (int i = 0; i < num_sites; i++) { const jcv_site* s = &sites[i]; // Remove duplicates, to avoid anomalies if( i > 0 && jcv_point_eq(&s->p, &sites[i - 1].p) ) { offset++; continue; } sites[i - offset] = sites[i]; jcv_rect_union(&r, &s->p); } internal->numsites -= offset; if (rect) { *rect = r; } return offset; } static int jcv_prune_not_in_shape(jcv_context_internal* internal, jcv_rect* rect) { int num_sites = internal->numsites; jcv_site* sites = internal->sites; jcv_rect r; r.min.x = r.min.y = JCV_FLT_MAX; r.max.x = r.max.y = -JCV_FLT_MAX; int offset = 0; for (int i = 0; i < num_sites; i++) { const jcv_site* s = &sites[i]; if (!internal->clipper.test_fn(&internal->clipper, s->p)) { offset++; continue; } sites[i - offset] = sites[i]; jcv_rect_union(&r, &s->p); } internal->numsites -= offset; if (rect) { *rect = r; } return offset; } static jcv_context_internal* jcv_alloc_internal(int num_points, void* userallocctx, FJCVAllocFn allocfn, FJCVFreeFn freefn) { // Interesting limits from Euler's equation // Slide 81: https://courses.cs.washington.edu/courses/csep521/01au/lectures/lecture10slides.pdf // Page 3: https://sites.cs.ucsb.edu/~suri/cs235/Voronoi.pdf int max_num_events = num_points*2; // beachline can have max 2*n-5 parabolas size_t sitessize = (size_t)num_points * sizeof(jcv_site); size_t memsize = 8u + (size_t)max_num_events * sizeof(void*) + sizeof(jcv_priorityqueue) + sitessize + sizeof(jcv_context_internal); char* originalmem = (char*)allocfn(userallocctx, memsize); memset(originalmem, 0, memsize); // align memory char* mem = originalmem + 8 - ( (size_t)(originalmem) & 0x7); jcv_context_internal* internal = (jcv_context_internal*)mem; mem += sizeof(jcv_context_internal); internal->mem = originalmem; internal->memctx = userallocctx; internal->alloc = allocfn; internal->free = freefn; internal->sites = (jcv_site*) mem; mem += sitessize; internal->eventqueue = (jcv_priorityqueue*)mem; mem += sizeof(jcv_priorityqueue); jcv_cast_align_struct tmp; tmp.charp = mem; internal->eventmem = tmp.voidpp; return internal; } void jcv_diagram_generate_useralloc(int num_points, const jcv_point* points, const jcv_rect* rect, const jcv_clipper* clipper, void* userallocctx, FJCVAllocFn allocfn, FJCVFreeFn freefn, jcv_diagram* d) { if( d->internal ) jcv_diagram_free( d ); jcv_context_internal* internal = jcv_alloc_internal(num_points, userallocctx, allocfn, freefn); internal->beachline_start = jcv_halfedge_new(internal, 0, 0); internal->beachline_end = jcv_halfedge_new(internal, 0, 0); internal->beachline_start->left = 0; internal->beachline_start->right = internal->beachline_end; internal->beachline_end->left = internal->beachline_start; internal->beachline_end->right = 0; internal->last_inserted = 0; int max_num_events = num_points*2; // beachline can have max 2*n-5 parabolas jcv_pq_create(internal->eventqueue, max_num_events, (void**)internal->eventmem); internal->numsites = num_points; jcv_site* sites = internal->sites; for( int i = 0; i < num_points; ++i ) { sites[i].p = points[i]; sites[i].edges = 0; sites[i].index = i; } qsort(sites, (size_t)num_points, sizeof(jcv_site), jcv_point_cmp); jcv_clipper box_clipper; if (clipper == 0) { box_clipper.test_fn = jcv_boxshape_test; box_clipper.clip_fn = jcv_boxshape_clip; box_clipper.fill_fn = jcv_boxshape_fillgaps; clipper = &box_clipper; } internal->clipper = *clipper; jcv_rect tmp_rect; tmp_rect.min.x = tmp_rect.min.y = JCV_FLT_MAX; tmp_rect.max.x = tmp_rect.max.y = -JCV_FLT_MAX; jcv_prune_duplicates(internal, &tmp_rect); // Prune using the test second if (internal->clipper.test_fn) { // e.g. used by the box clipper in the test_fn internal->clipper.min = rect ? rect->min : tmp_rect.min; internal->clipper.max = rect ? rect->max : tmp_rect.max; jcv_prune_not_in_shape(internal, &tmp_rect); // The pruning might have made the bounding box smaller if (!rect) { // In the case of all sites being all on a horizontal or vertical line, the // rect area will be zero, and the diagram generation will most likely fail jcv_rect_round(&tmp_rect); jcv_rect_inflate(&tmp_rect, 10); internal->clipper.min = tmp_rect.min; internal->clipper.max = tmp_rect.max; } } internal->rect = rect ? *rect : tmp_rect; d->min = internal->rect.min; d->max = internal->rect.max; d->numsites = internal->numsites; d->internal = internal; internal->bottomsite = jcv_nextsite(internal); jcv_priorityqueue* pq = internal->eventqueue; jcv_site* site = jcv_nextsite(internal); int finished = 0; while( !finished ) { jcv_point lowest_pq_point; if( !jcv_pq_empty(pq) ) { jcv_halfedge* he = (jcv_halfedge*)jcv_pq_top(pq); lowest_pq_point.x = he->vertex.x; lowest_pq_point.y = he->y; } if( site != 0 && (jcv_pq_empty(pq) || jcv_point_less(&site->p, &lowest_pq_point) ) ) { jcv_site_event(internal, site); site = jcv_nextsite(internal); } else if( !jcv_pq_empty(pq) ) { jcv_circle_event(internal); } else { finished = 1; } } for( jcv_halfedge* he = internal->beachline_start->right; he != internal->beachline_end; he = he->right ) { jcv_finishline(internal, he->edge); } jcv_fillgaps(d); } #endif // JC_VORONOI_IMPLEMENTATION /* ABOUT: A fast single file 2D voronoi diagram generator HISTORY: 0.7 2019-10-25 - Added support for clipping against convex polygons - Added JCV_EDGE_INTERSECT_THRESHOLD for edge intersections - Fixed issue where the bounds calculation wasn’t considering all points 0.6 2018-10-21 - Removed JCV_CEIL/JCV_FLOOR/JCV_FABS - Optimizations: Fewer indirections, better beach head approximation 0.5 2018-10-14 - Fixed issue where the graph edge had the wrong edge assigned (issue #28) - Fixed issue where a point was falsely passing the jcv_is_valid() test (issue #22) - Fixed jcv_diagram_get_edges() so it now returns _all_ edges (issue #28) - Added jcv_diagram_get_next_edge() to skip zero length edges (issue #10) - Added defines JCV_CEIL/JCV_FLOOR/JCV_FLT_MAX for easier configuration 0.4 2017-06-03 - Increased the max number of events that are preallocated 0.3 2017-04-16 - Added clipping box as input argument (Automatically calculated if needed) - Input points are pruned based on bounding box 0.2 2016-12-30 - Fixed issue of edges not being closed properly - Fixed issue when having many events - Fixed edge sorting - Code cleanup 0.1 Initial version LICENSE: The MIT License (MIT) Copyright (c) 2015-2019 Mathias Westerdahl Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. DISCLAIMER: This software is supplied "AS IS" without any warranties and support USAGE: The input points are pruned if * There are duplicates points * The input points are outside of the bounding box (i.e. fail the clipping test function) * The input points are rejected by the clipper's test function The input bounding box is optional (calculated automatically) The input domain is (-FLT_MAX, FLT_MAX] (for floats) The api consists of these functions: void jcv_diagram_generate( int num_points, const jcv_point* points, const jcv_rect* rect, const jcv_clipper* clipper, jcv_diagram* diagram ); void jcv_diagram_generate_useralloc( int num_points, const jcv_point* points, const jcv_rect* rect, const jcv_clipper* clipper, const jcv_clipper* clipper, void* userallocctx, FJCVAllocFn allocfn, FJCVFreeFn freefn, jcv_diagram* diagram ); void jcv_diagram_free( jcv_diagram* diagram ); const jcv_site* jcv_diagram_get_sites( const jcv_diagram* diagram ); const jcv_edge* jcv_diagram_get_edges( const jcv_diagram* diagram ); const jcv_edge* jcv_diagram_get_next_edge( const jcv_edge* edge ); An example usage: #define JC_VORONOI_IMPLEMENTATION // If you wish to use doubles //#define JCV_REAL_TYPE double //#define JCV_ATAN2 atan2 //#define JCV_FLT_MAX 1.7976931348623157E+308 #include "jc_voronoi.h" void draw_edges(const jcv_diagram* diagram); void draw_cells(const jcv_diagram* diagram); void generate_and_draw(int numpoints, const jcv_point* points) { jcv_diagram diagram; memset(&diagram, 0, sizeof(jcv_diagram)); jcv_diagram_generate(count, points, 0, 0, &diagram); draw_edges(diagram); draw_cells(diagram); jcv_diagram_free( &diagram ); } void draw_edges(const jcv_diagram* diagram) { // If all you need are the edges const jcv_edge* edge = jcv_diagram_get_edges( diagram ); while( edge ) { draw_line(edge->pos[0], edge->pos[1]); edge = jcv_diagram_get_next_edge(edge); } } void draw_cells(const jcv_diagram* diagram) { // If you want to draw triangles, or relax the diagram, // you can iterate over the sites and get all edges easily const jcv_site* sites = jcv_diagram_get_sites( diagram ); for( int i = 0; i < diagram->numsites; ++i ) { const jcv_site* site = &sites[i]; const jcv_graphedge* e = site->edges; while( e ) { draw_triangle( site->p, e->pos[0], e->pos[1]); e = e->next; } } } // Here is a simple example of how to do the relaxations of the cells void relax_points(const jcv_diagram* diagram, jcv_point* points) { const jcv_site* sites = jcv_diagram_get_sites(diagram); for( int i = 0; i < diagram->numsites; ++i ) { const jcv_site* site = &sites[i]; jcv_point sum = site->p; int count = 1; const jcv_graphedge* edge = site->edges; while( edge ) { sum.x += edge->pos[0].x; sum.y += edge->pos[0].y; ++count; edge = edge->next; } points[site->index].x = sum.x / count; points[site->index].y = sum.y / count; } } */ ================================================ FILE: deps/jc_voronoi/include/jc_voronoi/jc_voronoi_clip.h ================================================ // Copyright (c) 2019 Mathias Westerdahl // For full LICENSE (MIT) or USAGE, see bottom of file #ifndef JC_VORONOI_CLIP_H #define JC_VORONOI_CLIP_H #include "jc_voronoi.h" #pragma pack(push, 1) typedef struct _jcv_clipping_polygon { jcv_point* points; int num_points; } jcv_clipping_polygon; #pragma pack(pop) // Convex polygon clip functions int jcv_clip_polygon_test_point(const jcv_clipper* clipper, const jcv_point p); int jcv_clip_polygon_clip_edge(const jcv_clipper* clipper, jcv_edge* e); void jcv_clip_polygon_fill_gaps(const jcv_clipper* clipper, jcv_context_internal* allocator, jcv_site* site); #endif // JC_VORONOI_CLIP_H #ifdef JC_VORONOI_CLIP_IMPLEMENTATION #undef JC_VORONOI_CLIP_IMPLEMENTATION // These helpers will probably end up in the main library static inline jcv_real jcv_cross(const jcv_point a, const jcv_point b) { return a.x * b.y - a.y * b.x; } static inline jcv_point jcv_add(jcv_point a, jcv_point b) { jcv_point r; r.x = a.x + b.x; r.y = a.y + b.y; return r; } static inline jcv_point jcv_sub(jcv_point a, jcv_point b) { jcv_point r; r.x = a.x - b.x; r.y = a.y - b.y; return r; } static inline jcv_point jcv_mul(jcv_point v, jcv_real s) { jcv_point r; r.x = v.x * s; r.y = v.y * s; return r; } static inline jcv_point jcv_mix(jcv_point a, jcv_point b, jcv_real t) { jcv_point r; r.x = a.x + (b.x - a.x) * t; r.y = a.y + (b.y - a.y) * t; return r; } static inline jcv_real jcv_dot(jcv_point a, jcv_point b) { return a.x * b.x + a.y * b.y; } static inline jcv_real jcv_length(jcv_point v) { return JCV_SQRT(v.x*v.x + v.y*v.y); } static inline jcv_real jcv_length_sq(jcv_point v) { return v.x*v.x + v.y*v.y; } static inline jcv_real jcv_fabs(jcv_real a) { return a < 0 ? -a : a; } // if it returns [0.0, 1.0] it's on the line segment static inline jcv_real jcv_point_to_line_segment_t(jcv_point p, jcv_point p0, jcv_point p1) { jcv_point vpoint = jcv_sub(p, p0); jcv_point vsegment = jcv_sub(p1, p0); return jcv_dot(vsegment, vpoint) / jcv_dot(vsegment, vsegment); } int jcv_clip_polygon_test_point(const jcv_clipper* clipper, const jcv_point p) { jcv_clipping_polygon* polygon = (jcv_clipping_polygon*)clipper->ctx; int num_points = polygon->num_points; // convex polygon // winding CCW // all polygon normals point outward // if the point is in front of the plane, it is outside int result = 1; for (int i = 0; i < num_points; ++i) { jcv_point p0 = polygon->points[i]; jcv_point p1 = polygon->points[(i+1)%num_points]; jcv_point n; n.x = p1.y - p0.y; n.y = p0.x - p1.x; jcv_point diff; diff.x = p.x - p0.x; diff.y = p.y - p0.y; if (jcv_dot(n, diff) > 0) { result = 0; break; } } return result; } static int jcv_ray_intersect_polygon(const jcv_clipper* clipper, jcv_point p0, jcv_point p1, jcv_real* out_t0, jcv_real* out_t1) { jcv_clipping_polygon* polygon = (jcv_clipping_polygon*)clipper->ctx; int num_points = polygon->num_points; jcv_real t0 = (jcv_real)0; jcv_real t1 = (jcv_real)1; jcv_point dir = jcv_sub(p1, p0); for (int i = 0; i < num_points; ++i) { jcv_point v0 = polygon->points[i]; jcv_point v1 = polygon->points[(i+1)%num_points]; jcv_point n; n.x = v1.y - v0.y; n.y = -(v1.x - v0.x); jcv_point v0p0 = jcv_sub(p0, v0); jcv_real N = -jcv_dot(v0p0, n); jcv_real D = jcv_dot(dir, n); if (jcv_fabs(D) < 0.0001f) // parallel to the line { if (N < 0) return 0; continue; } jcv_real t = N / D; if (D < 0) // -> entering { t0 = t > t0 ? t : t0; if (t0 > t1) return 0; } else // D > 0 -> exiting { t1 = t < t1 ? t : t1; if (t1 < t0) return 0; } } *out_t0 = t0; *out_t1 = t1; return 1; } int jcv_clip_polygon_clip_edge(const jcv_clipper* clipper, jcv_edge* e) { // Using the box clipper to get a finite line segment int result = jcv_boxshape_clip(clipper, e); if (!result) return 0; jcv_point p0 = e->pos[0]; jcv_point p1 = e->pos[1]; jcv_real t0; jcv_real t1; result = jcv_ray_intersect_polygon(clipper, p0, p1, &t0, &t1); if (!result) { e->pos[0] = e->pos[1]; return 0; } e->pos[0] = jcv_mix(p0, p1, t0); e->pos[1] = jcv_mix(p0, p1, t1); return 1; } // Find the edge which the point sits on static int jcv_find_polygon_edge(const jcv_clipper* clipper, jcv_point p) { jcv_clipping_polygon* polygon = (jcv_clipping_polygon*)clipper->ctx; int min_edge = -1; jcv_real min_dist = (jcv_real)1000000; int num_points = polygon->num_points; for (int i = 0; i < num_points; ++i) { jcv_point p0 = polygon->points[i]; if (jcv_point_eq(&p, &p0)) return i; jcv_point p1 = polygon->points[(i+1)%num_points]; jcv_point vsegment = jcv_sub(p1, p0); jcv_point vpoint = jcv_sub(p, p0); jcv_real t = jcv_dot(vsegment, vpoint) / jcv_dot(vsegment,vsegment); if (t < (jcv_real)0.0f || t > (jcv_real)1.0f) continue; jcv_point projected = jcv_add(p0, jcv_mul(vsegment, t)); jcv_real distsq = jcv_length_sq(jcv_sub(p, projected)); if (distsq < min_dist) { min_dist = distsq; min_edge = i; } } assert(min_edge >= 0); return min_edge; } void jcv_clip_polygon_fill_gaps(const jcv_clipper* clipper, jcv_context_internal* allocator, jcv_site* site) { // They're sorted CCW, so if the current->pos[1] != next->pos[0], then we have a gap jcv_clipping_polygon* polygon = (jcv_clipping_polygon*)clipper->ctx; int num_points = polygon->num_points; jcv_graphedge* current = site->edges; if( !current ) { jcv_graphedge* gap = jcv_alloc_graphedge(allocator); gap->neighbor = 0; // Pick the first edge of the polygon (which is also CCW) gap->pos[0] = polygon->points[0]; gap->pos[1] = polygon->points[1]; gap->angle = jcv_calc_sort_metric(site, gap); gap->next = 0; gap->edge = jcv_create_gap_edge(allocator, site, gap); current = gap; site->edges = gap; } jcv_graphedge* next = current->next; if( !next ) { jcv_graphedge* gap = jcv_alloc_graphedge(allocator); int polygon_edge = jcv_find_polygon_edge(clipper, current->pos[1]); if (!jcv_point_eq(¤t->pos[1], &polygon->points[(polygon_edge+1)%num_points])) { gap->pos[0] = current->pos[1]; gap->pos[1] = polygon->points[(polygon_edge+1)%num_points]; } else { gap->pos[0] = polygon->points[(polygon_edge+1)%num_points]; gap->pos[1] = polygon->points[(polygon_edge+2)%num_points]; } gap->neighbor = 0; gap->angle = jcv_calc_sort_metric(site, gap); gap->next = 0; gap->edge = jcv_create_gap_edge(allocator, site, gap); gap->next = current->next; current->next = gap; current = gap; next = site->edges; } while (current && next) { if (!jcv_point_eq(¤t->pos[1], &next->pos[0])) { int polygon_edge1 = jcv_find_polygon_edge(clipper, current->pos[1]); int polygon_edge2 = jcv_find_polygon_edge(clipper, next->pos[0]); jcv_graphedge* gap = jcv_alloc_graphedge(allocator); gap->pos[0] = current->pos[1]; if (polygon_edge1 != polygon_edge2) { gap->pos[1] = polygon->points[(polygon_edge1+1)%num_points]; } else { gap->pos[1] = next->pos[0]; } gap->neighbor = 0; gap->angle = jcv_calc_sort_metric(site, gap); gap->edge = jcv_create_gap_edge(allocator, site, gap); gap->next = current->next; current->next = gap; } current = current->next; if( current ) { next = current->next; if( !next ) { next = site->edges; } } } } #endif // JC_VORONOI_CLIP_IMPLEMENTATION /* ABOUT: Helper functions for clipping a vosonoi diagram against a convex polygon LICENSE: The MIT License (MIT) Copyright (c) 2019 Mathias Westerdahl Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. DISCLAIMER: This software is supplied "AS IS" without any warranties and support USAGE: USAGE: The function `jcv_clipper` struct allows for supplying a set of custom clipper functions to interact with the generating of the resulting diagram. #define JC_VORONOI_CLIP_IMPLEMENTATION #include "jc_voronoi_clip.h" jcv_clipping_polygon polygon; // Triangle polygon.num_points = 3; polygon.points = (jcv_point*)malloc(sizeof(jcv_point)*(size_t)polygon.num_points); polygon.points[0].x = width/2; polygon.points[1].x = width - width/5; polygon.points[2].x = width/5; polygon.points[0].y = height/5; polygon.points[1].y = height - height/5; polygon.points[2].y = height - height/5; jcv_clipper polygonclipper; polygonclipper.test_fn = jcv_clip_polygon_test_point; polygonclipper.clip_fn = jcv_clip_polygon_clip_edge; polygonclipper.fill_fn = jcv_clip_polygon_fill_gaps; polygonclipper.ctx = &polygon; jcv_diagram diagram; memset(&diagram, 0, sizeof(jcv_diagram)); jcv_diagram_generate(count, (const jcv_point*)points, 0, clipper, &diagram); */ ================================================ FILE: misc/conditional_deploy_build.py ================================================ import sys, os, subprocess if os.getenv("APPVEYOR_REPO_TAG") == "true": proc = subprocess.Popen(['python','-m', 'twine', 'upload','--skip-existing', 'wheelhouse/*']) proc.communicate() # wait for it to terminate # forward the return code code = proc.returncode sys.exit(code) else: print("not deploying, no appveyor repo tag present") ================================================ FILE: pyproject.toml ================================================ [project] name = "robust_laplacian" version = "1.1.0" description = "Robust Laplace matrices for meshes and point clouds" readme = "README.md" license.file = "LICENSE" authors = [ { name = "Nicholas Sharp", email = "nmwsharp@gmail.com" }, ] maintainers = [ { name = "Nicholas Sharp", email = "nmwsharp@gmail.com" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", ] requires-python = ">=3.9" dependencies = [ "numpy", ] [project.urls] Homepage = "https://github.com/nmwsharp/robust-laplacians-py" [build-system] requires = ["scikit-build-core"] build-backend = "scikit_build_core.build" [tool.scikit-build] build.verbose = true logging.level = "INFO" ================================================ FILE: src/cpp/.clang-format ================================================ --- AlignAfterOpenBracket: Align AlignOperands: 'true' AllowShortBlocksOnASingleLine: 'false' AllowShortIfStatementsOnASingleLine: 'true' AllowShortLoopsOnASingleLine: 'true' AlwaysBreakTemplateDeclarations: 'true' BinPackParameters: 'true' BreakBeforeBraces: Attach ColumnLimit: '120' IndentWidth: '2' KeepEmptyLinesAtTheStartOfBlocks: 'true' MaxEmptyLinesToKeep: '2' PointerAlignment: Left ReflowComments: 'true' SpacesInAngles: 'false' SpacesInParentheses: 'false' SpacesInSquareBrackets: 'false' Standard: Cpp11 UseTab: Never ... ================================================ FILE: src/cpp/core.cpp ================================================ #include "point_cloud_utilities.h" #include "geometrycentral/numerical/linear_algebra_utilities.h" #include "geometrycentral/surface/edge_length_geometry.h" #include "geometrycentral/surface/intrinsic_mollification.h" #include "geometrycentral/surface/manifold_surface_mesh.h" #include "geometrycentral/surface/meshio.h" #include "geometrycentral/surface/simple_polygon_mesh.h" #include "geometrycentral/surface/surface_mesh.h" #include "geometrycentral/surface/surface_mesh_factories.h" #include "geometrycentral/surface/tufted_laplacian.h" #include "geometrycentral/surface/vertex_position_geometry.h" #include #include #include #include "Eigen/Dense" namespace py = pybind11; using namespace geometrycentral; using namespace geometrycentral::surface; // For overloaded functions, with C++11 compiler only template using overload_cast_ = pybind11::detail::overload_cast_impl; // Parameters related to unused elements. Maybe expose these as parameters? double laplacianReplaceVal = 1.0; double massReplaceVal = -1e-3; std::tuple, SparseMatrix> buildMeshLaplacian(const DenseMatrix& vMat, const DenseMatrix& fMat, double mollifyFactor) { // First, load a simple polygon mesh SimplePolygonMesh simpleMesh; // Copy to std vector representation simpleMesh.vertexCoordinates.resize(vMat.rows()); for (size_t iP = 0; iP < simpleMesh.vertexCoordinates.size(); iP++) { simpleMesh.vertexCoordinates[iP] = Vector3{vMat(iP, 0), vMat(iP, 1), vMat(iP, 2)}; } simpleMesh.polygons.resize(fMat.rows()); for (size_t iF = 0; iF < simpleMesh.polygons.size(); iF++) { simpleMesh.polygons[iF] = std::vector{fMat(iF, 0), fMat(iF, 1), fMat(iF, 2)}; } // Remove any unused vertices std::vector oldToNewMap = simpleMesh.stripUnusedVertices(); // Build the rich mesh data structure std::unique_ptr mesh; std::unique_ptr geometry; std::tie(mesh, geometry) = makeSurfaceMeshAndGeometry(simpleMesh.polygons, simpleMesh.vertexCoordinates); // Do the hard work, calling the geometry-central function SparseMatrix L, M; std::tie(L, M) = buildTuftedLaplacian(*mesh, *geometry, mollifyFactor); // If necessary, re-index matrices to account for any unreferenced vertices which were skipped. // For any unreferenced verts, creates an identity row/col in the Laplacian and bool anyUnreferenced = false; for (const size_t& ind : oldToNewMap) { if (ind == INVALID_IND) anyUnreferenced = true; } if (anyUnreferenced) { // Invert the map std::vector newToOldMap(simpleMesh.nVertices()); for (size_t iOld = 0; iOld < oldToNewMap.size(); iOld++) { if (oldToNewMap[iOld] != INVALID_IND) { newToOldMap[oldToNewMap[iOld]] = iOld; } } size_t N = oldToNewMap.size(); { // Update the Laplacian std::vector> triplets; // Copy entries for (int k = 0; k < L.outerSize(); k++) { for (typename SparseMatrix::InnerIterator it(L, k); it; ++it) { double thisVal = it.value(); int i = it.row(); int j = it.col(); triplets.emplace_back(newToOldMap[i], newToOldMap[j], thisVal); } } // Add diagonal entries for unreferenced for (size_t iOld = 0; iOld < oldToNewMap.size(); iOld++) { if (oldToNewMap[iOld] == INVALID_IND) { triplets.emplace_back(iOld, iOld, laplacianReplaceVal); } } // Update the matrix L = SparseMatrix(N, N); L.setFromTriplets(triplets.begin(), triplets.end()); } { // Update the mass matrix std::vector> triplets; // Copy entries double smallestVal = std::numeric_limits::infinity(); for (int k = 0; k < M.outerSize(); k++) { for (typename SparseMatrix::InnerIterator it(M, k); it; ++it) { double thisVal = it.value(); int i = it.row(); int j = it.col(); triplets.emplace_back(newToOldMap[i], newToOldMap[j], thisVal); smallestVal = std::fmin(smallestVal, std::abs(thisVal)); } } // Add diagonal entries for unreferenced double newMassVal = massReplaceVal < 0 ? -massReplaceVal * smallestVal : massReplaceVal; for (size_t iOld = 0; iOld < oldToNewMap.size(); iOld++) { if (oldToNewMap[iOld] == INVALID_IND) { triplets.emplace_back(iOld, iOld, newMassVal); } } // Update the matrix M = SparseMatrix(N, N); M.setFromTriplets(triplets.begin(), triplets.end()); } } return std::make_tuple(L, M); } std::tuple, SparseMatrix> buildPointCloudLaplacian(const DenseMatrix& vMat, double mollifyFactor, size_t nNeigh) { SimplePolygonMesh cloudMesh; // Copy to std vector representation cloudMesh.vertexCoordinates.resize(vMat.rows()); for (size_t iP = 0; iP < cloudMesh.vertexCoordinates.size(); iP++) { cloudMesh.vertexCoordinates[iP] = Vector3{vMat(iP, 0), vMat(iP, 1), vMat(iP, 2)}; } // Generate the local triangulations for the point cloud Neighbors_t neigh = generate_knn(cloudMesh.vertexCoordinates, nNeigh); std::vector normals = generate_normals(cloudMesh.vertexCoordinates, neigh); std::vector> coords = generate_coords_projection(cloudMesh.vertexCoordinates, normals, neigh); LocalTriangulationResult localTri = build_delaunay_triangulations(coords, neigh); // Take the union of all triangles in all the neighborhoods for (size_t iPt = 0; iPt < cloudMesh.vertexCoordinates.size(); iPt++) { const std::vector& thisNeigh = neigh[iPt]; size_t nNeigh = thisNeigh.size(); // Accumulate over triangles for (const auto& tri : localTri.pointTriangles[iPt]) { std::array triGlobal = {iPt, thisNeigh[tri[1]], thisNeigh[tri[2]]}; cloudMesh.polygons.push_back({triGlobal[0], triGlobal[1], triGlobal[2]}); } } // strip unreferenced vertices (can we argue this should never happen? good regardless for robustness.) std::vector oldToNewMap = cloudMesh.stripUnusedVertices(); std::unique_ptr mesh; std::unique_ptr geometry; std::tie(mesh, geometry) = makeSurfaceMeshAndGeometry(cloudMesh.polygons, cloudMesh.vertexCoordinates); SparseMatrix L, M; std::tie(L, M) = buildTuftedLaplacian(*mesh, *geometry, mollifyFactor); L = L / 3.; M = M / 3.; // If necessary, re-index matrices to account for any unreferenced vertices which were skipped. // For any unreferenced verts, creates an identity row/col in the Laplacian and bool anyUnreferenced = false; for (const size_t& ind : oldToNewMap) { if (ind == INVALID_IND) anyUnreferenced = true; } if (anyUnreferenced) { // Invert the map std::vector newToOldMap(cloudMesh.nVertices()); for (size_t iOld = 0; iOld < oldToNewMap.size(); iOld++) { if (oldToNewMap[iOld] != INVALID_IND) { newToOldMap[oldToNewMap[iOld]] = iOld; } } size_t N = oldToNewMap.size(); { // Update the Laplacian std::vector> triplets; // Copy entries for (int k = 0; k < L.outerSize(); k++) { for (typename SparseMatrix::InnerIterator it(L, k); it; ++it) { double thisVal = it.value(); int i = it.row(); int j = it.col(); triplets.emplace_back(newToOldMap[i], newToOldMap[j], thisVal); } } // Add diagonal entries for unreferenced for (size_t iOld = 0; iOld < oldToNewMap.size(); iOld++) { if (oldToNewMap[iOld] == INVALID_IND) { triplets.emplace_back(iOld, iOld, laplacianReplaceVal); } } // Update the matrix L = SparseMatrix(N, N); L.setFromTriplets(triplets.begin(), triplets.end()); } { // Update the mass matrix std::vector> triplets; // Copy entries double smallestVal = std::numeric_limits::infinity(); for (int k = 0; k < M.outerSize(); k++) { for (typename SparseMatrix::InnerIterator it(M, k); it; ++it) { double thisVal = it.value(); int i = it.row(); int j = it.col(); triplets.emplace_back(newToOldMap[i], newToOldMap[j], thisVal); smallestVal = std::fmin(smallestVal, std::abs(thisVal)); } } // Add diagonal entries for unreferenced double newMassVal = massReplaceVal < 0 ? -massReplaceVal * smallestVal : massReplaceVal; for (size_t iOld = 0; iOld < oldToNewMap.size(); iOld++) { if (oldToNewMap[iOld] == INVALID_IND) { triplets.emplace_back(iOld, iOld, newMassVal); } } // Update the matrix M = SparseMatrix(N, N); M.setFromTriplets(triplets.begin(), triplets.end()); } } return std::make_tuple(L, M); } // Actual binding code // clang-format off PYBIND11_MODULE(robust_laplacian_bindings, m) { m.doc() = "Robust laplacian low-level bindings"; m.def("buildMeshLaplacian", &buildMeshLaplacian, "build the mesh Laplacian", py::arg("vMat"), py::arg("fMat"), py::arg("mollifyFactor")); m.def("buildPointCloudLaplacian", &buildPointCloudLaplacian, "build the point cloud Laplacian", py::arg("vMat"), py::arg("mollifyFactor"), py::arg("nNeigh")); } // clang-format on ================================================ FILE: src/cpp/point_cloud_utilities.cpp ================================================ #include "point_cloud_utilities.h" #include "geometrycentral/utilities/elementary_geometry.h" #include "geometrycentral/utilities/knn.h" #include "Eigen/Dense" #include #include std::vector> generate_knn(const std::vector& points, size_t k) { geometrycentral::NearestNeighborFinder finder(points); std::vector> result; for (size_t i = 0; i < points.size(); i++) { result.emplace_back(finder.kNearestNeighbors(i, k)); } return result; } std::vector generate_normals(const std::vector& points, const Neighbors_t& neigh) { std::vector normals(points.size()); for (size_t iPt = 0; iPt < points.size(); iPt++) { size_t nNeigh = neigh[iPt].size(); // Compute centroid Vector3 center{0., 0., 0.}; for (size_t iN = 0; iN < nNeigh; iN++) { center += points[neigh[iPt][iN]]; } center /= nNeigh + 1; // Assemble matrix os vectors from centroid Eigen::MatrixXd localMat(3, neigh[iPt].size()); for (size_t iN = 0; iN < nNeigh; iN++) { Vector3 neighPos = points[neigh[iPt][iN]] - center; localMat(0, iN) = neighPos.x; localMat(1, iN) = neighPos.y; localMat(2, iN) = neighPos.z; } // Smallest singular vector is best normal Eigen::JacobiSVD svd(localMat, Eigen::ComputeThinU); Eigen::Vector3d bestNormal = svd.matrixU().col(2); Vector3 N{bestNormal(0), bestNormal(1), bestNormal(2)}; N = unit(N); normals[iPt] = N; } return normals; } std::vector> generate_coords_projection(const std::vector& points, const std::vector normals, const Neighbors_t& neigh) { std::vector> coords(points.size()); for (size_t iPt = 0; iPt < points.size(); iPt++) { size_t nNeigh = neigh[iPt].size(); coords[iPt].resize(nNeigh); Vector3 center = points[iPt]; Vector3 normal = normals[iPt]; // build an arbitrary tangent basis Vector3 basisX, basisY; auto r = normal.buildTangentBasis(); basisX = r[0]; basisY = r[1]; for (size_t iN = 0; iN < nNeigh; iN++) { Vector3 vec = points[neigh[iPt][iN]] - center; vec = vec.removeComponent(normal); Vector2 coord{dot(basisX, vec), dot(basisY, vec)}; coords[iPt][iN] = coord; } } return coords; } // For each planar-projected neighborhood, generate the triangles in the Delaunay triangulation which are incident on // the center vertex. // // This could be done robustly via e.g. Shewchuk's triangle.c. However, instead we use a simple self-contained strategy // which leverages the needs of this particular situation. In particular, we don't really care about getting exactly the // Delaunay triangulation; we're just looking for any sane triangulation to use as input the the subsequent step. We // just use Delaunay because we like the property that (in the limit of sampling), it's a triple-cover of the domain; // with other strategies it's hard to quantify how many times our triangles cover the domain. This makes the problem // easier, because for degenerate/underdetermined cases, we're happy to output any triangulation, even if it's not the // Delaunay triangulation in exact arithmetic. // // This strategy works by angularly sorting points relative to the neighborhood center, then walking around circle // identifying pairs of edges which form Delaunay triangles (more details inline). In particular, using a sorting of the // points helps to distinguish indeterminate cases and always output some triangles. Additionally, a few heuristics are // included for handling of degenerate and collinear points. This routine has O(n*k^2) complexity, where k is the // neighborhood size). LocalTriangulationResult build_delaunay_triangulations(const std::vector>& coords, const Neighbors_t& neigh) { // A few innocent numerical parameters const double PERTURB_THRESH = 1e-7; // in units of relative length const double ANGLE_COLLINEAR_THRESH = 1e-5; // in units of radians const double OUTSIDE_EPS = 1e-4; // in units of relative length // NOTE: This is not robust if the entire neighbohood is coincident (or very nearly coincident) with the centerpoint. // Though in that case, the generate_normals() routine will probably also have issues. size_t nPts = coords.size(); LocalTriangulationResult result; result.pointTriangles.resize(nPts); for (size_t iPt = 0; iPt < nPts; iPt++) { size_t nNeigh = neigh[iPt].size(); double lenScale = norm(coords[iPt].back()); // Something is hopelessly degenerate, don't even bother trying. No triangles for this point. if (!std::isfinite(lenScale) || lenScale <= 0) { continue; } // Local copies of points std::vector perturbPoints = coords[iPt]; std::vector perturbInds = neigh[iPt]; { // Perturb points which are extremely close to the source for (size_t iNeigh = 0; iNeigh < nNeigh; iNeigh++) { Vector2& neighPt = perturbPoints[iNeigh]; double dist = norm(neighPt); if (dist < lenScale * PERTURB_THRESH) { // need to perturb Vector2 dir = normalize(neighPt); if (!isfinite(dir)) { // even direction is degenerate :( // pick a direction from index double thetaDir = (2. * M_PI * iNeigh) / nNeigh; dir = Vector2::fromAngle(thetaDir); } // Set the distance from the origin for the pertubed point. Including the index avoids creating many // co-circular points; no need to stress the Delaunay triangulation unnessecarily. double len = (1. + static_cast(iNeigh) / nNeigh) * lenScale * PERTURB_THRESH * 10; neighPt = len * dir; // update the point } } } size_t closestPointInd = 0; double closestPointDist = std::numeric_limits::infinity(); bool hasBoundary = false; { // Find the starting point for the angular search. // If there is boundary, it's the beginning of the interior region; otherwise its the closest point. // (either way, this point is guaranteed to appear in the triangulation) // NOTE: boundary check is actually done after inline sort below, since its cheaper there for (size_t iNeigh = 0; iNeigh < nNeigh; iNeigh++) { Vector2 neighPt = perturbPoints[iNeigh]; double thisPointDist = norm(neighPt); if (thisPointDist < closestPointDist) { closestPointDist = thisPointDist; closestPointInd = iNeigh; } } } std::vector sortInds(nNeigh); { // = Angularly sort the points CCW, such that the closest point comes first // Angular sort std::vector pointAngles(nNeigh); for (size_t i = 0; i < nNeigh; i++) { pointAngles[i] = arg(perturbPoints[i]); } std::iota(std::begin(sortInds), std::end(sortInds), 0); std::sort(sortInds.begin(), sortInds.end(), [&](const size_t& a, const size_t& b) -> bool { return pointAngles[a] < pointAngles[b]; }); // Check if theres a gap of >= PI between any two consecutive points. If so it's a boundary. double largestGap = -1; size_t largestGapEndInd = 0; for (size_t i = 0; i < nNeigh; i++) { size_t j = (i + 1) % nNeigh; double angleI = pointAngles[sortInds[i]]; double angleJ = pointAngles[sortInds[j]]; double gap; if (i + 1 == nNeigh) { gap = angleJ - (angleI + 2 * M_PI); } else { gap = angleJ - angleI; } if (gap > largestGap) { largestGap = gap; largestGapEndInd = j; } } // The start of the cyclic ordering is either size_t firstInd; if (largestGap > (M_PI - ANGLE_COLLINEAR_THRESH)) { firstInd = largestGapEndInd; hasBoundary = true; } else { firstInd = std::distance(sortInds.begin(), std::find(sortInds.begin(), sortInds.end(), closestPointInd)); hasBoundary = false; } // Cyclically permute to ensure starting point comes first std::rotate(sortInds.begin(), sortInds.begin() + firstInd, sortInds.end()); } size_t edgeStartInd = 0; std::vector>& thisPointTriangles = result.pointTriangles[iPt]; // accumulate result // end point should wrap around the check the first point only if there is no boundary size_t searchEnd = nNeigh + (hasBoundary ? 0 : 1); // Walk around the angularly-sorted points, forming triangles spanning angular regions. To construct each triangle, // we start with leg at edgeStartInd, then search over edgeEndInd to find the first other end which has an empty // circumcircle. Once it is found, we form a triangle and being searching again from edgeEndInd. // // At first, this might sound like it has n^3 complexity, since there are n^2 triangles to consider, and testing // each costs n. However, since we march around the angular direction in increasing order, we will only test at most // O(n) triangles, leading to n^2 complexity. while (edgeStartInd < nNeigh) { size_t iStart = sortInds[edgeStartInd]; Vector2 startPos = perturbPoints[iStart]; // lookahead and find the first triangle we can form with an empty (or nearly empty) circumcircle bool foundTri = false; for (size_t edgeEndInd = edgeStartInd + 1; edgeEndInd < searchEnd; edgeEndInd++) { size_t iEnd = sortInds[edgeEndInd % nNeigh]; Vector2 endPos = perturbPoints[iEnd]; // If the start and end points are too close to being colinear, don't bother Vector2 startPosDir = unit(startPos); Vector2 endPosDir = unit(endPos); if (std::fabs(cross(startPosDir, endPosDir)) < ANGLE_COLLINEAR_THRESH) { continue; } // Find the circumcenter and circumradius geometrycentral::RayRayIntersectionResult2D isect = rayRayIntersection(0.5 * startPos, startPosDir.rotate90(), 0.5 * endPos, -endPosDir.rotate90()); Vector2 circumcenter = 0.5 * startPos + isect.tRay1 * startPosDir.rotate90(); double circumradius = norm(circumcenter); // Find the minimum distance to the circumcenter double nearestDistSq = std::numeric_limits::infinity(); double circumradSqConservative = (circumradius - lenScale * OUTSIDE_EPS); circumradSqConservative *= circumradSqConservative; for (size_t iTest = 0; iTest < nNeigh; iTest++) { if (iTest == iStart || iTest == iEnd) continue; // skip the points forming the triangle double thisDistSq = norm2(circumcenter - perturbPoints[iTest]); nearestDistSq = std::fmin(nearestDistSq, thisDistSq); // if it's already strictly inside, no need to keep searching if (nearestDistSq < circumradSqConservative) break; } double nearestDist = std::sqrt(nearestDistSq); // Accept the triangle if its circumcircle is sufficiently empty // NOTE: The choice of signs in this expression is important: we preferential DO accept triangles whos // circumcircle is barely empty. This makes sense here because our circular loop already avoids any risk of // accepting overlapping triangles; the risk is in not accepting any, so we should preferrentially accept. if (nearestDist + lenScale * OUTSIDE_EPS > circumradius) { std::array triInds = {std::numeric_limits::max(), iStart, iEnd}; thisPointTriangles.push_back(triInds); // advance the circular search to find a triangle starting at this edge edgeStartInd = edgeEndInd; foundTri = true; break; } } // if we couldn't find any triangles, increment the start index if (!foundTri) { edgeStartInd++; } } } return result; } ================================================ FILE: src/cpp/point_cloud_utilities.h ================================================ #pragma once #include "geometrycentral/utilities/vector2.h" #include "geometrycentral/utilities/vector3.h" #include "geometrycentral/numerical/linear_algebra_utilities.h" using geometrycentral::SparseMatrix; using geometrycentral::Vector; using geometrycentral::Vector2; using geometrycentral::Vector3; // === Basic utility methods using Neighbors_t = std::vector>; // Generate the k-nearest-neighbors for the points. // The list will _not_ include the center point. Neighbors_t generate_knn(const std::vector& points, size_t k); // Estimate normals from a neighborhood (arbitrarily oriented) std::vector generate_normals(const std::vector& points, const Neighbors_t& neigh); // Project a neighborhood to 2D tangent plane // The output is in correspondence with `neigh`, with the center point implicitly at (0,0) std::vector> generate_coords_projection(const std::vector& points, const std::vector normals, const Neighbors_t& neigh); struct LocalTriangulationResult { // all triangle indices are in to the neighbors list std::vector>> pointTriangles; // the triangles which touch the center vertex, always // numbered such that the center vertex comes first }; LocalTriangulationResult build_delaunay_triangulations(const std::vector>& coords, const Neighbors_t& neigh); ================================================ FILE: src/robust_laplacian/__init__.py ================================================ from robust_laplacian.core import * ================================================ FILE: src/robust_laplacian/core.py ================================================ import numpy as np import robust_laplacian_bindings as rlb def mesh_laplacian(verts, faces, mollify_factor=1e-5): ## Validate input if type(verts) is not np.ndarray: raise ValueError("`verts` should be a numpy array") if (len(verts.shape) != 2) or (verts.shape[1] != 3): raise ValueError("`verts` should have shape (V,3), shape is " + str(verts.shape)) if type(faces) is not np.ndarray: raise ValueError("`faces` should be a numpy array") if (len(faces.shape) != 2) or (faces.shape[1] != 3): raise ValueError("`faces` should have shape (F,3), shape is " + str(faces.shape)) ## Call the main algorithm from the bindings L, M = rlb.buildMeshLaplacian(verts, faces, mollify_factor) ## Return the result return L, M def point_cloud_laplacian(points, mollify_factor=1e-5, n_neighbors=30): ## Validate input if type(points) is not np.ndarray: raise ValueError("`points` should be a numpy array") if (len(points.shape) != 2) or (points.shape[1] != 3): raise ValueError("`points` should have shape (V,3), shape is " + str(points.shape)) ## Call the main algorithm from the bindings L, M = rlb.buildPointCloudLaplacian(points, mollify_factor, n_neighbors) ## Return the result return L, M ================================================ FILE: test/robust_laplacian_test.py ================================================ import unittest import os import sys import os.path as path import numpy as np import scipy # Path to where the bindings live sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src"))) if os.name == 'nt': # if Windows # handle default location where VS puts binary sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "build", "Debug"))) else: # normal / unix case sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "build"))) import robust_laplacian as rl def generate_verts(n_pts=999): np.random.seed(777) return np.random.rand(n_pts, 3) def generate_faces(n_pts=999): # n_pts should be a multiple of 3 for indexing to work out np.random.seed(777) rand_faces = np.random.randint(0, n_pts, size=(2*n_pts,3)) coverage_faces = np.arange(n_pts).reshape(-1, 3) faces = np.vstack((rand_faces, coverage_faces)) return faces def is_symmetric(A, eps=1e-6): resid = A - A.T return np.all(np.abs(resid.data) < eps) def is_nonnegative(A, eps=1e-6): return np.all(A.data > -eps) class TestCore(unittest.TestCase): def test_mesh_laplacian(self): V = generate_verts() F = generate_faces() L, M = rl.mesh_laplacian(V, F) # Validate mass matrix self.assertTrue(is_nonnegative(M)) self.assertTrue(is_symmetric(M)) self.assertEqual(M.sum(), M.diagonal().sum()) # Validate Laplacian self.assertTrue(is_symmetric(L)) off_L = scipy.sparse.diags(L.diagonal()) - L self.assertTrue(is_nonnegative(off_L)) # positive edge weights self.assertGreater(L.sum(), -1e-5) # Trigger validation errors # rl.mesh_laplacian("cat", F) # rl.mesh_laplacian(V, "cat") # rl.mesh_laplacian(V.flatten(), F) # rl.mesh_laplacian(V, F.flatten()) def test_mesh_laplacian_unused_verts(self): V = generate_verts() F = generate_faces() # add an unused vertex at the beginning and end V = np.vstack(( np.array([[0., 0., 0.,]]), V, np.array([[0., 0., 0.,]]) )) F = F + 1 L, M = rl.mesh_laplacian(V, F) # Validate mass matrix self.assertTrue(is_nonnegative(M)) self.assertTrue(is_symmetric(M)) self.assertEqual(M.sum(), M.diagonal().sum()) # Validate Laplacian self.assertTrue(is_symmetric(L)) off_L = scipy.sparse.diags(L.diagonal()) - L self.assertTrue(is_nonnegative(off_L)) # positive edge weights self.assertGreater(L.sum(), -1e-5) def test_point_cloud_laplacian(self): V = generate_verts() L, M = rl.point_cloud_laplacian(V) # Validate mass matrix self.assertTrue(is_nonnegative(M)) self.assertTrue(is_symmetric(M)) self.assertEqual(M.sum(), M.diagonal().sum()) # Validate Laplacian self.assertTrue(is_symmetric(L)) off_L = scipy.sparse.diags(L.diagonal()) - L self.assertTrue(is_nonnegative(off_L)) # positive edge weights self.assertGreater(L.sum(), -1e-5) # Trigger validation errors # rl.point_cloud_laplacian("cat") # rl.point_cloud_laplacian(V.flatten()) if __name__ == '__main__': unittest.main() ================================================ FILE: test/sample.py ================================================ import os, sys import polyscope as ps import numpy as np import scipy.sparse.linalg as sla from plyfile import PlyData # Path to where the bindings live sys.path.append(os.path.join(os.path.dirname(__file__), "../build/")) sys.path.append(os.path.join(os.path.dirname(__file__), "../src/")) import robust_laplacian # Read input plydata = PlyData.read("/path/to/cloud.ply") points = np.vstack(( plydata['vertex']['x'], plydata['vertex']['y'], plydata['vertex']['z'] )).T # for meshes # tri_data = plydata['face'].data['vertex_indices'] # faces = np.vstack(tri_data) # Build Laplacian L, M = robust_laplacian.point_cloud_laplacian(points, mollify_factor=1e-5) # for meshes # L, M = robust_laplacian.mesh_laplacian(points, faces, mollify_factor=1e-5) # Compute some eigenvectors n_eig = 10 evals, evecs = sla.eigsh(L, n_eig, M, sigma=1e-8) # Visualize ps.init() ps_cloud = ps.register_point_cloud("my cloud", points) for i in range(n_eig): ps_cloud.add_scalar_quantity("eigenvector_"+str(i), evecs[:,i], enabled=True) # for meshes # ps_surf = ps.register_surface_mesh("my surf", points, faces) # for i in range(n_eig): # ps_surf.add_scalar_quantity("eigenvector_"+str(i), evecs[:,i], enabled=True) ps.show()