Repository: openxrlab/xrnerf Branch: main Commit: f8020561b91b Files: 2000 Total size: 15.8 MB Directory structure: gitextract_oeikmd9e/ ├── .github/ │ └── workflows/ │ ├── build.yml │ └── lint.yml ├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── README.md ├── README_CN.md ├── configs/ │ ├── __init__.py │ ├── _base_/ │ │ └── models/ │ │ └── nerf.py │ ├── animatable_nerf/ │ │ ├── an_h36m_s11_novel_pose.py │ │ ├── an_h36m_s11_train_pose.py │ │ ├── an_h36m_s1_novel_pose.py │ │ ├── an_h36m_s1_train_pose.py │ │ ├── an_h36m_s5_novel_pose.py │ │ ├── an_h36m_s5_train_pose.py │ │ ├── an_h36m_s6_novel_pose.py │ │ ├── an_h36m_s6_train_pose.py │ │ ├── an_h36m_s7_novel_pose.py │ │ ├── an_h36m_s7_train_pose.py │ │ ├── an_h36m_s8_novel_pose.py │ │ ├── an_h36m_s8_train_pose.py │ │ ├── an_h36m_s9_novel_pose.py │ │ ├── an_h36m_s9_render_train_pose.py │ │ ├── an_h36m_s9_train_pose.py │ │ ├── an_zjumocap_313_novel_pose.py │ │ └── an_zjumocap_313_train_pose.py │ ├── bungeenerf/ │ │ └── bungeenerf_multiscale_google.py │ ├── gnr/ │ │ └── gnr_genebody.py │ ├── instant_ngp/ │ │ └── nerf_blender_local01.py │ ├── kilonerf/ │ │ ├── kilonerf_distill_BlendedMVS_base01.py │ │ ├── kilonerf_distill_Synthetic_NeRF_base01.py │ │ ├── kilonerf_finetune_BlendedMVS_base01.py │ │ ├── kilonerf_finetune_Synthetic_NeRF_base01.py │ │ ├── kilonerf_pretrain_BlendedMVS_base01.py │ │ └── kilonerf_pretrain_Synthetic_NeRF_base01.py │ ├── mipnerf/ │ │ ├── mipnerf_blender.py │ │ └── mipnerf_multiscale.py │ ├── nerf/ │ │ ├── nerf_blender_base01.py │ │ └── nerf_llff_base01.py │ └── neuralbody/ │ ├── nb_zjumocap_313.py │ ├── nb_zjumocap_315.py │ ├── nb_zjumocap_377.py │ ├── nb_zjumocap_386.py │ ├── nb_zjumocap_387.py │ ├── nb_zjumocap_390.py │ ├── nb_zjumocap_392.py │ ├── nb_zjumocap_393.py │ ├── nb_zjumocap_394.py │ └── nb_zjumocap_render_313.py ├── docker/ │ ├── Dockerfile │ ├── DockerfileCN │ ├── daemon.json │ └── sources.list ├── docs/ │ ├── en/ │ │ ├── CONTRIBUTING.md │ │ ├── additional_licenses.md │ │ ├── apis.md │ │ ├── benchmark.md │ │ ├── dataset_preparation.md │ │ ├── faq.md │ │ ├── get_started.md │ │ ├── installation.md │ │ └── tutorials/ │ │ ├── config.md │ │ ├── data_pipeline.md │ │ └── model.md │ └── zh_cn/ │ ├── apis.md │ ├── dataset_preparation.md │ ├── get_started.md │ ├── installation.md │ └── tutorials/ │ ├── config.md │ ├── data_pipeline.md │ └── model.md ├── extensions/ │ ├── mesh_grid/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── matrix.h │ │ ├── mesh_grid.cpp │ │ ├── mesh_grid_kernel.cu │ │ ├── mesh_grid_searcher.py │ │ ├── render.cpp │ │ ├── render.cu │ │ ├── render.h │ │ ├── setup.py │ │ ├── surface_inside.cpp │ │ └── test_mesh_grid.py │ └── ngp_raymarch/ │ ├── README.md │ ├── include/ │ │ ├── op_include/ │ │ │ ├── eigen/ │ │ │ │ ├── .gitignore │ │ │ │ ├── .gitlab/ │ │ │ │ │ ├── issue_templates/ │ │ │ │ │ │ ├── Bug Report.md │ │ │ │ │ │ └── Feature Request.md │ │ │ │ │ └── merge_request_templates/ │ │ │ │ │ └── Merge Request Template.md │ │ │ │ ├── .gitlab-ci.yml │ │ │ │ ├── .hgeol │ │ │ │ ├── COPYING.APACHE │ │ │ │ ├── COPYING.BSD │ │ │ │ ├── COPYING.GPL │ │ │ │ ├── COPYING.LGPL │ │ │ │ ├── COPYING.MINPACK │ │ │ │ ├── COPYING.MPL2 │ │ │ │ ├── COPYING.README │ │ │ │ ├── CTestConfig.cmake │ │ │ │ ├── CTestCustom.cmake.in │ │ │ │ ├── Eigen/ │ │ │ │ │ ├── Cholesky │ │ │ │ │ ├── CholmodSupport │ │ │ │ │ ├── Core │ │ │ │ │ ├── Dense │ │ │ │ │ ├── Eigen │ │ │ │ │ ├── Eigenvalues │ │ │ │ │ ├── Geometry │ │ │ │ │ ├── Householder │ │ │ │ │ ├── IterativeLinearSolvers │ │ │ │ │ ├── Jacobi │ │ │ │ │ ├── KLUSupport │ │ │ │ │ ├── LU │ │ │ │ │ ├── MetisSupport │ │ │ │ │ ├── OrderingMethods │ │ │ │ │ ├── PaStiXSupport │ │ │ │ │ ├── PardisoSupport │ │ │ │ │ ├── QR │ │ │ │ │ ├── QtAlignedMalloc │ │ │ │ │ ├── SPQRSupport │ │ │ │ │ ├── SVD │ │ │ │ │ ├── Sparse │ │ │ │ │ ├── SparseCholesky │ │ │ │ │ ├── SparseCore │ │ │ │ │ ├── SparseLU │ │ │ │ │ ├── SparseQR │ │ │ │ │ ├── StdDeque │ │ │ │ │ ├── StdList │ │ │ │ │ ├── StdVector │ │ │ │ │ ├── SuperLUSupport │ │ │ │ │ ├── UmfPackSupport │ │ │ │ │ └── src/ │ │ │ │ │ ├── Cholesky/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── LDLT.h │ │ │ │ │ │ ├── LLT.h │ │ │ │ │ │ └── LLT_LAPACKE.h │ │ │ │ │ ├── CholmodSupport/ │ │ │ │ │ │ ├── CholmodSupport.h │ │ │ │ │ │ └── InternalHeaderCheck.h │ │ │ │ │ ├── Core/ │ │ │ │ │ │ ├── ArithmeticSequence.h │ │ │ │ │ │ ├── Array.h │ │ │ │ │ │ ├── ArrayBase.h │ │ │ │ │ │ ├── ArrayWrapper.h │ │ │ │ │ │ ├── Assign.h │ │ │ │ │ │ ├── AssignEvaluator.h │ │ │ │ │ │ ├── Assign_MKL.h │ │ │ │ │ │ ├── BandMatrix.h │ │ │ │ │ │ ├── Block.h │ │ │ │ │ │ ├── BooleanRedux.h │ │ │ │ │ │ ├── CommaInitializer.h │ │ │ │ │ │ ├── ConditionEstimator.h │ │ │ │ │ │ ├── CoreEvaluators.h │ │ │ │ │ │ ├── CoreIterators.h │ │ │ │ │ │ ├── CwiseBinaryOp.h │ │ │ │ │ │ ├── CwiseNullaryOp.h │ │ │ │ │ │ ├── CwiseTernaryOp.h │ │ │ │ │ │ ├── CwiseUnaryOp.h │ │ │ │ │ │ ├── CwiseUnaryView.h │ │ │ │ │ │ ├── DenseBase.h │ │ │ │ │ │ ├── DenseCoeffsBase.h │ │ │ │ │ │ ├── DenseStorage.h │ │ │ │ │ │ ├── Diagonal.h │ │ │ │ │ │ ├── DiagonalMatrix.h │ │ │ │ │ │ ├── DiagonalProduct.h │ │ │ │ │ │ ├── Dot.h │ │ │ │ │ │ ├── EigenBase.h │ │ │ │ │ │ ├── ForceAlignedAccess.h │ │ │ │ │ │ ├── Fuzzy.h │ │ │ │ │ │ ├── GeneralProduct.h │ │ │ │ │ │ ├── GenericPacketMath.h │ │ │ │ │ │ ├── GlobalFunctions.h │ │ │ │ │ │ ├── IO.h │ │ │ │ │ │ ├── IndexedView.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── Inverse.h │ │ │ │ │ │ ├── Map.h │ │ │ │ │ │ ├── MapBase.h │ │ │ │ │ │ ├── MathFunctions.h │ │ │ │ │ │ ├── MathFunctionsImpl.h │ │ │ │ │ │ ├── Matrix.h │ │ │ │ │ │ ├── MatrixBase.h │ │ │ │ │ │ ├── NestByValue.h │ │ │ │ │ │ ├── NoAlias.h │ │ │ │ │ │ ├── NumTraits.h │ │ │ │ │ │ ├── PartialReduxEvaluator.h │ │ │ │ │ │ ├── PermutationMatrix.h │ │ │ │ │ │ ├── PlainObjectBase.h │ │ │ │ │ │ ├── Product.h │ │ │ │ │ │ ├── ProductEvaluators.h │ │ │ │ │ │ ├── Random.h │ │ │ │ │ │ ├── Redux.h │ │ │ │ │ │ ├── Ref.h │ │ │ │ │ │ ├── Replicate.h │ │ │ │ │ │ ├── Reshaped.h │ │ │ │ │ │ ├── ReturnByValue.h │ │ │ │ │ │ ├── Reverse.h │ │ │ │ │ │ ├── Select.h │ │ │ │ │ │ ├── SelfAdjointView.h │ │ │ │ │ │ ├── SelfCwiseBinaryOp.h │ │ │ │ │ │ ├── Solve.h │ │ │ │ │ │ ├── SolveTriangular.h │ │ │ │ │ │ ├── SolverBase.h │ │ │ │ │ │ ├── StableNorm.h │ │ │ │ │ │ ├── StlIterators.h │ │ │ │ │ │ ├── Stride.h │ │ │ │ │ │ ├── Swap.h │ │ │ │ │ │ ├── Transpose.h │ │ │ │ │ │ ├── Transpositions.h │ │ │ │ │ │ ├── TriangularMatrix.h │ │ │ │ │ │ ├── VectorBlock.h │ │ │ │ │ │ ├── VectorwiseOp.h │ │ │ │ │ │ ├── Visitor.h │ │ │ │ │ │ ├── arch/ │ │ │ │ │ │ │ ├── AVX/ │ │ │ │ │ │ │ │ ├── Complex.h │ │ │ │ │ │ │ │ ├── MathFunctions.h │ │ │ │ │ │ │ │ ├── PacketMath.h │ │ │ │ │ │ │ │ └── TypeCasting.h │ │ │ │ │ │ │ ├── AVX512/ │ │ │ │ │ │ │ │ ├── Complex.h │ │ │ │ │ │ │ │ ├── MathFunctions.h │ │ │ │ │ │ │ │ ├── PacketMath.h │ │ │ │ │ │ │ │ └── TypeCasting.h │ │ │ │ │ │ │ ├── AltiVec/ │ │ │ │ │ │ │ │ ├── Complex.h │ │ │ │ │ │ │ │ ├── MathFunctions.h │ │ │ │ │ │ │ │ ├── MatrixProduct.h │ │ │ │ │ │ │ │ ├── MatrixProductCommon.h │ │ │ │ │ │ │ │ ├── MatrixProductMMA.h │ │ │ │ │ │ │ │ └── PacketMath.h │ │ │ │ │ │ │ ├── Default/ │ │ │ │ │ │ │ │ ├── BFloat16.h │ │ │ │ │ │ │ │ ├── ConjHelper.h │ │ │ │ │ │ │ │ ├── GenericPacketMathFunctions.h │ │ │ │ │ │ │ │ ├── GenericPacketMathFunctionsFwd.h │ │ │ │ │ │ │ │ ├── Half.h │ │ │ │ │ │ │ │ ├── Settings.h │ │ │ │ │ │ │ │ └── TypeCasting.h │ │ │ │ │ │ │ ├── GPU/ │ │ │ │ │ │ │ │ ├── Complex.h │ │ │ │ │ │ │ │ ├── MathFunctions.h │ │ │ │ │ │ │ │ ├── PacketMath.h │ │ │ │ │ │ │ │ ├── Tuple.h │ │ │ │ │ │ │ │ └── TypeCasting.h │ │ │ │ │ │ │ ├── HIP/ │ │ │ │ │ │ │ │ └── hcc/ │ │ │ │ │ │ │ │ └── math_constants.h │ │ │ │ │ │ │ ├── MSA/ │ │ │ │ │ │ │ │ ├── Complex.h │ │ │ │ │ │ │ │ ├── MathFunctions.h │ │ │ │ │ │ │ │ └── PacketMath.h │ │ │ │ │ │ │ ├── NEON/ │ │ │ │ │ │ │ │ ├── Complex.h │ │ │ │ │ │ │ │ ├── GeneralBlockPanelKernel.h │ │ │ │ │ │ │ │ ├── MathFunctions.h │ │ │ │ │ │ │ │ ├── PacketMath.h │ │ │ │ │ │ │ │ ├── TypeCasting.h │ │ │ │ │ │ │ │ └── UnaryFunctors.h │ │ │ │ │ │ │ ├── SSE/ │ │ │ │ │ │ │ │ ├── Complex.h │ │ │ │ │ │ │ │ ├── MathFunctions.h │ │ │ │ │ │ │ │ ├── PacketMath.h │ │ │ │ │ │ │ │ └── TypeCasting.h │ │ │ │ │ │ │ ├── SVE/ │ │ │ │ │ │ │ │ ├── MathFunctions.h │ │ │ │ │ │ │ │ ├── PacketMath.h │ │ │ │ │ │ │ │ └── TypeCasting.h │ │ │ │ │ │ │ ├── SYCL/ │ │ │ │ │ │ │ │ ├── InteropHeaders.h │ │ │ │ │ │ │ │ ├── MathFunctions.h │ │ │ │ │ │ │ │ ├── PacketMath.h │ │ │ │ │ │ │ │ ├── SyclMemoryModel.h │ │ │ │ │ │ │ │ └── TypeCasting.h │ │ │ │ │ │ │ └── ZVector/ │ │ │ │ │ │ │ ├── Complex.h │ │ │ │ │ │ │ ├── MathFunctions.h │ │ │ │ │ │ │ └── PacketMath.h │ │ │ │ │ │ ├── functors/ │ │ │ │ │ │ │ ├── AssignmentFunctors.h │ │ │ │ │ │ │ ├── BinaryFunctors.h │ │ │ │ │ │ │ ├── NullaryFunctors.h │ │ │ │ │ │ │ ├── StlFunctors.h │ │ │ │ │ │ │ ├── TernaryFunctors.h │ │ │ │ │ │ │ └── UnaryFunctors.h │ │ │ │ │ │ ├── products/ │ │ │ │ │ │ │ ├── GeneralBlockPanelKernel.h │ │ │ │ │ │ │ ├── GeneralMatrixMatrix.h │ │ │ │ │ │ │ ├── GeneralMatrixMatrixTriangular.h │ │ │ │ │ │ │ ├── GeneralMatrixMatrixTriangular_BLAS.h │ │ │ │ │ │ │ ├── GeneralMatrixMatrix_BLAS.h │ │ │ │ │ │ │ ├── GeneralMatrixVector.h │ │ │ │ │ │ │ ├── GeneralMatrixVector_BLAS.h │ │ │ │ │ │ │ ├── Parallelizer.h │ │ │ │ │ │ │ ├── SelfadjointMatrixMatrix.h │ │ │ │ │ │ │ ├── SelfadjointMatrixMatrix_BLAS.h │ │ │ │ │ │ │ ├── SelfadjointMatrixVector.h │ │ │ │ │ │ │ ├── SelfadjointMatrixVector_BLAS.h │ │ │ │ │ │ │ ├── SelfadjointProduct.h │ │ │ │ │ │ │ ├── SelfadjointRank2Update.h │ │ │ │ │ │ │ ├── TriangularMatrixMatrix.h │ │ │ │ │ │ │ ├── TriangularMatrixMatrix_BLAS.h │ │ │ │ │ │ │ ├── TriangularMatrixVector.h │ │ │ │ │ │ │ ├── TriangularMatrixVector_BLAS.h │ │ │ │ │ │ │ ├── TriangularSolverMatrix.h │ │ │ │ │ │ │ ├── TriangularSolverMatrix_BLAS.h │ │ │ │ │ │ │ └── TriangularSolverVector.h │ │ │ │ │ │ └── util/ │ │ │ │ │ │ ├── BlasUtil.h │ │ │ │ │ │ ├── ConfigureVectorization.h │ │ │ │ │ │ ├── Constants.h │ │ │ │ │ │ ├── DisableStupidWarnings.h │ │ │ │ │ │ ├── ForwardDeclarations.h │ │ │ │ │ │ ├── IndexedViewHelper.h │ │ │ │ │ │ ├── IntegralConstant.h │ │ │ │ │ │ ├── MKL_support.h │ │ │ │ │ │ ├── Macros.h │ │ │ │ │ │ ├── Memory.h │ │ │ │ │ │ ├── Meta.h │ │ │ │ │ │ ├── NonMPL2.h │ │ │ │ │ │ ├── ReenableStupidWarnings.h │ │ │ │ │ │ ├── ReshapedHelper.h │ │ │ │ │ │ ├── Serializer.h │ │ │ │ │ │ ├── StaticAssert.h │ │ │ │ │ │ ├── SymbolicIndex.h │ │ │ │ │ │ └── XprHelper.h │ │ │ │ │ ├── Eigenvalues/ │ │ │ │ │ │ ├── ComplexEigenSolver.h │ │ │ │ │ │ ├── ComplexSchur.h │ │ │ │ │ │ ├── ComplexSchur_LAPACKE.h │ │ │ │ │ │ ├── EigenSolver.h │ │ │ │ │ │ ├── GeneralizedEigenSolver.h │ │ │ │ │ │ ├── GeneralizedSelfAdjointEigenSolver.h │ │ │ │ │ │ ├── HessenbergDecomposition.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── MatrixBaseEigenvalues.h │ │ │ │ │ │ ├── RealQZ.h │ │ │ │ │ │ ├── RealSchur.h │ │ │ │ │ │ ├── RealSchur_LAPACKE.h │ │ │ │ │ │ ├── SelfAdjointEigenSolver.h │ │ │ │ │ │ ├── SelfAdjointEigenSolver_LAPACKE.h │ │ │ │ │ │ └── Tridiagonalization.h │ │ │ │ │ ├── Geometry/ │ │ │ │ │ │ ├── AlignedBox.h │ │ │ │ │ │ ├── AngleAxis.h │ │ │ │ │ │ ├── EulerAngles.h │ │ │ │ │ │ ├── Homogeneous.h │ │ │ │ │ │ ├── Hyperplane.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── OrthoMethods.h │ │ │ │ │ │ ├── ParametrizedLine.h │ │ │ │ │ │ ├── Quaternion.h │ │ │ │ │ │ ├── Rotation2D.h │ │ │ │ │ │ ├── RotationBase.h │ │ │ │ │ │ ├── Scaling.h │ │ │ │ │ │ ├── Transform.h │ │ │ │ │ │ ├── Translation.h │ │ │ │ │ │ ├── Umeyama.h │ │ │ │ │ │ └── arch/ │ │ │ │ │ │ └── Geometry_SIMD.h │ │ │ │ │ ├── Householder/ │ │ │ │ │ │ ├── BlockHouseholder.h │ │ │ │ │ │ ├── Householder.h │ │ │ │ │ │ ├── HouseholderSequence.h │ │ │ │ │ │ └── InternalHeaderCheck.h │ │ │ │ │ ├── IterativeLinearSolvers/ │ │ │ │ │ │ ├── BasicPreconditioners.h │ │ │ │ │ │ ├── BiCGSTAB.h │ │ │ │ │ │ ├── ConjugateGradient.h │ │ │ │ │ │ ├── IncompleteCholesky.h │ │ │ │ │ │ ├── IncompleteLUT.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── IterativeSolverBase.h │ │ │ │ │ │ ├── LeastSquareConjugateGradient.h │ │ │ │ │ │ └── SolveWithGuess.h │ │ │ │ │ ├── Jacobi/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── Jacobi.h │ │ │ │ │ ├── KLUSupport/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── KLUSupport.h │ │ │ │ │ ├── LU/ │ │ │ │ │ │ ├── Determinant.h │ │ │ │ │ │ ├── FullPivLU.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── InverseImpl.h │ │ │ │ │ │ ├── PartialPivLU.h │ │ │ │ │ │ ├── PartialPivLU_LAPACKE.h │ │ │ │ │ │ └── arch/ │ │ │ │ │ │ └── InverseSize4.h │ │ │ │ │ ├── MetisSupport/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── MetisSupport.h │ │ │ │ │ ├── OrderingMethods/ │ │ │ │ │ │ ├── Amd.h │ │ │ │ │ │ ├── Eigen_Colamd.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── Ordering.h │ │ │ │ │ ├── PaStiXSupport/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── PaStiXSupport.h │ │ │ │ │ ├── PardisoSupport/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── PardisoSupport.h │ │ │ │ │ ├── QR/ │ │ │ │ │ │ ├── ColPivHouseholderQR.h │ │ │ │ │ │ ├── ColPivHouseholderQR_LAPACKE.h │ │ │ │ │ │ ├── CompleteOrthogonalDecomposition.h │ │ │ │ │ │ ├── FullPivHouseholderQR.h │ │ │ │ │ │ ├── HouseholderQR.h │ │ │ │ │ │ ├── HouseholderQR_LAPACKE.h │ │ │ │ │ │ └── InternalHeaderCheck.h │ │ │ │ │ ├── SPQRSupport/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── SuiteSparseQRSupport.h │ │ │ │ │ ├── SVD/ │ │ │ │ │ │ ├── BDCSVD.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── JacobiSVD.h │ │ │ │ │ │ ├── JacobiSVD_LAPACKE.h │ │ │ │ │ │ ├── SVDBase.h │ │ │ │ │ │ └── UpperBidiagonalization.h │ │ │ │ │ ├── SparseCholesky/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── SimplicialCholesky.h │ │ │ │ │ │ └── SimplicialCholesky_impl.h │ │ │ │ │ ├── SparseCore/ │ │ │ │ │ │ ├── AmbiVector.h │ │ │ │ │ │ ├── CompressedStorage.h │ │ │ │ │ │ ├── ConservativeSparseSparseProduct.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── MappedSparseMatrix.h │ │ │ │ │ │ ├── SparseAssign.h │ │ │ │ │ │ ├── SparseBlock.h │ │ │ │ │ │ ├── SparseColEtree.h │ │ │ │ │ │ ├── SparseCompressedBase.h │ │ │ │ │ │ ├── SparseCwiseBinaryOp.h │ │ │ │ │ │ ├── SparseCwiseUnaryOp.h │ │ │ │ │ │ ├── SparseDenseProduct.h │ │ │ │ │ │ ├── SparseDiagonalProduct.h │ │ │ │ │ │ ├── SparseDot.h │ │ │ │ │ │ ├── SparseFuzzy.h │ │ │ │ │ │ ├── SparseMap.h │ │ │ │ │ │ ├── SparseMatrix.h │ │ │ │ │ │ ├── SparseMatrixBase.h │ │ │ │ │ │ ├── SparsePermutation.h │ │ │ │ │ │ ├── SparseProduct.h │ │ │ │ │ │ ├── SparseRedux.h │ │ │ │ │ │ ├── SparseRef.h │ │ │ │ │ │ ├── SparseSelfAdjointView.h │ │ │ │ │ │ ├── SparseSolverBase.h │ │ │ │ │ │ ├── SparseSparseProductWithPruning.h │ │ │ │ │ │ ├── SparseTranspose.h │ │ │ │ │ │ ├── SparseTriangularView.h │ │ │ │ │ │ ├── SparseUtil.h │ │ │ │ │ │ ├── SparseVector.h │ │ │ │ │ │ ├── SparseView.h │ │ │ │ │ │ └── TriangularSolver.h │ │ │ │ │ ├── SparseLU/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── SparseLU.h │ │ │ │ │ │ ├── SparseLUImpl.h │ │ │ │ │ │ ├── SparseLU_Memory.h │ │ │ │ │ │ ├── SparseLU_Structs.h │ │ │ │ │ │ ├── SparseLU_SupernodalMatrix.h │ │ │ │ │ │ ├── SparseLU_Utils.h │ │ │ │ │ │ ├── SparseLU_column_bmod.h │ │ │ │ │ │ ├── SparseLU_column_dfs.h │ │ │ │ │ │ ├── SparseLU_copy_to_ucol.h │ │ │ │ │ │ ├── SparseLU_gemm_kernel.h │ │ │ │ │ │ ├── SparseLU_heap_relax_snode.h │ │ │ │ │ │ ├── SparseLU_kernel_bmod.h │ │ │ │ │ │ ├── SparseLU_panel_bmod.h │ │ │ │ │ │ ├── SparseLU_panel_dfs.h │ │ │ │ │ │ ├── SparseLU_pivotL.h │ │ │ │ │ │ ├── SparseLU_pruneL.h │ │ │ │ │ │ └── SparseLU_relax_snode.h │ │ │ │ │ ├── SparseQR/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── SparseQR.h │ │ │ │ │ ├── StlSupport/ │ │ │ │ │ │ ├── StdDeque.h │ │ │ │ │ │ ├── StdList.h │ │ │ │ │ │ ├── StdVector.h │ │ │ │ │ │ └── details.h │ │ │ │ │ ├── SuperLUSupport/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── SuperLUSupport.h │ │ │ │ │ ├── UmfPackSupport/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── UmfPackSupport.h │ │ │ │ │ ├── misc/ │ │ │ │ │ │ ├── Image.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── Kernel.h │ │ │ │ │ │ ├── RealSvd2x2.h │ │ │ │ │ │ ├── blas.h │ │ │ │ │ │ ├── lapack.h │ │ │ │ │ │ ├── lapacke.h │ │ │ │ │ │ └── lapacke_mangling.h │ │ │ │ │ └── plugins/ │ │ │ │ │ ├── ArrayCwiseBinaryOps.h │ │ │ │ │ ├── ArrayCwiseUnaryOps.h │ │ │ │ │ ├── BlockMethods.h │ │ │ │ │ ├── CommonCwiseBinaryOps.h │ │ │ │ │ ├── CommonCwiseUnaryOps.h │ │ │ │ │ ├── IndexedViewMethods.h │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ ├── MatrixCwiseBinaryOps.h │ │ │ │ │ ├── MatrixCwiseUnaryOps.h │ │ │ │ │ └── ReshapedMethods.h │ │ │ │ ├── INSTALL │ │ │ │ ├── README.md │ │ │ │ ├── bench/ │ │ │ │ │ ├── BenchSparseUtil.h │ │ │ │ │ ├── BenchTimer.h │ │ │ │ │ ├── BenchUtil.h │ │ │ │ │ ├── analyze-blocking-sizes.cpp │ │ │ │ │ ├── basicbench.cxxlist │ │ │ │ │ ├── basicbenchmark.cpp │ │ │ │ │ ├── basicbenchmark.h │ │ │ │ │ ├── benchBlasGemm.cpp │ │ │ │ │ ├── benchCholesky.cpp │ │ │ │ │ ├── benchEigenSolver.cpp │ │ │ │ │ ├── benchFFT.cpp │ │ │ │ │ ├── benchGeometry.cpp │ │ │ │ │ ├── benchVecAdd.cpp │ │ │ │ │ ├── bench_gemm.cpp │ │ │ │ │ ├── bench_move_semantics.cpp │ │ │ │ │ ├── bench_multi_compilers.sh │ │ │ │ │ ├── bench_norm.cpp │ │ │ │ │ ├── bench_reverse.cpp │ │ │ │ │ ├── bench_sum.cpp │ │ │ │ │ ├── bench_unrolling │ │ │ │ │ ├── benchmark-blocking-sizes.cpp │ │ │ │ │ ├── benchmark.cpp │ │ │ │ │ ├── benchmarkSlice.cpp │ │ │ │ │ ├── benchmarkX.cpp │ │ │ │ │ ├── benchmarkXcwise.cpp │ │ │ │ │ ├── benchmark_suite │ │ │ │ │ ├── btl/ │ │ │ │ │ │ ├── COPYING │ │ │ │ │ │ ├── README │ │ │ │ │ │ ├── actions/ │ │ │ │ │ │ │ ├── action_aat_product.hh │ │ │ │ │ │ │ ├── action_ata_product.hh │ │ │ │ │ │ │ ├── action_atv_product.hh │ │ │ │ │ │ │ ├── action_axpby.hh │ │ │ │ │ │ │ ├── action_axpy.hh │ │ │ │ │ │ │ ├── action_cholesky.hh │ │ │ │ │ │ │ ├── action_ger.hh │ │ │ │ │ │ │ ├── action_hessenberg.hh │ │ │ │ │ │ │ ├── action_lu_decomp.hh │ │ │ │ │ │ │ ├── action_lu_solve.hh │ │ │ │ │ │ │ ├── action_matrix_matrix_product.hh │ │ │ │ │ │ │ ├── action_matrix_matrix_product_bis.hh │ │ │ │ │ │ │ ├── action_matrix_vector_product.hh │ │ │ │ │ │ │ ├── action_partial_lu.hh │ │ │ │ │ │ │ ├── action_rot.hh │ │ │ │ │ │ │ ├── action_symv.hh │ │ │ │ │ │ │ ├── action_syr2.hh │ │ │ │ │ │ │ ├── action_trisolve.hh │ │ │ │ │ │ │ ├── action_trisolve_matrix.hh │ │ │ │ │ │ │ ├── action_trmm.hh │ │ │ │ │ │ │ └── basic_actions.hh │ │ │ │ │ │ ├── cmake/ │ │ │ │ │ │ │ ├── FindACML.cmake │ │ │ │ │ │ │ ├── FindATLAS.cmake │ │ │ │ │ │ │ ├── FindBLAZE.cmake │ │ │ │ │ │ │ ├── FindBlitz.cmake │ │ │ │ │ │ │ ├── FindCBLAS.cmake │ │ │ │ │ │ │ ├── FindGMM.cmake │ │ │ │ │ │ │ ├── FindMKL.cmake │ │ │ │ │ │ │ ├── FindMTL4.cmake │ │ │ │ │ │ │ ├── FindOPENBLAS.cmake │ │ │ │ │ │ │ ├── FindPackageHandleStandardArgs.cmake │ │ │ │ │ │ │ ├── FindTvmet.cmake │ │ │ │ │ │ │ └── MacroOptionalAddSubdirectory.cmake │ │ │ │ │ │ ├── generic_bench/ │ │ │ │ │ │ │ ├── bench.hh │ │ │ │ │ │ │ ├── bench_parameter.hh │ │ │ │ │ │ │ ├── btl.hh │ │ │ │ │ │ │ ├── init/ │ │ │ │ │ │ │ │ ├── init_function.hh │ │ │ │ │ │ │ │ ├── init_matrix.hh │ │ │ │ │ │ │ │ └── init_vector.hh │ │ │ │ │ │ │ ├── static/ │ │ │ │ │ │ │ │ ├── bench_static.hh │ │ │ │ │ │ │ │ ├── intel_bench_fixed_size.hh │ │ │ │ │ │ │ │ └── static_size_generator.hh │ │ │ │ │ │ │ ├── timers/ │ │ │ │ │ │ │ │ ├── STL_perf_analyzer.hh │ │ │ │ │ │ │ │ ├── STL_timer.hh │ │ │ │ │ │ │ │ ├── mixed_perf_analyzer.hh │ │ │ │ │ │ │ │ ├── portable_perf_analyzer.hh │ │ │ │ │ │ │ │ ├── portable_perf_analyzer_old.hh │ │ │ │ │ │ │ │ ├── portable_timer.hh │ │ │ │ │ │ │ │ ├── x86_perf_analyzer.hh │ │ │ │ │ │ │ │ └── x86_timer.hh │ │ │ │ │ │ │ └── utils/ │ │ │ │ │ │ │ ├── size_lin_log.hh │ │ │ │ │ │ │ ├── size_log.hh │ │ │ │ │ │ │ ├── utilities.h │ │ │ │ │ │ │ └── xy_file.hh │ │ │ │ │ │ └── libs/ │ │ │ │ │ │ ├── BLAS/ │ │ │ │ │ │ │ ├── blas.h │ │ │ │ │ │ │ ├── blas_interface.hh │ │ │ │ │ │ │ ├── blas_interface_impl.hh │ │ │ │ │ │ │ ├── c_interface_base.h │ │ │ │ │ │ │ └── main.cpp │ │ │ │ │ │ ├── STL/ │ │ │ │ │ │ │ ├── STL_interface.hh │ │ │ │ │ │ │ └── main.cpp │ │ │ │ │ │ ├── blaze/ │ │ │ │ │ │ │ ├── blaze_interface.hh │ │ │ │ │ │ │ └── main.cpp │ │ │ │ │ │ ├── blitz/ │ │ │ │ │ │ │ ├── blitz_LU_solve_interface.hh │ │ │ │ │ │ │ ├── blitz_interface.hh │ │ │ │ │ │ │ ├── btl_blitz.cpp │ │ │ │ │ │ │ ├── btl_tiny_blitz.cpp │ │ │ │ │ │ │ └── tiny_blitz_interface.hh │ │ │ │ │ │ ├── eigen2/ │ │ │ │ │ │ │ ├── btl_tiny_eigen2.cpp │ │ │ │ │ │ │ ├── eigen2_interface.hh │ │ │ │ │ │ │ ├── main_adv.cpp │ │ │ │ │ │ │ ├── main_linear.cpp │ │ │ │ │ │ │ ├── main_matmat.cpp │ │ │ │ │ │ │ └── main_vecmat.cpp │ │ │ │ │ │ ├── eigen3/ │ │ │ │ │ │ │ ├── btl_tiny_eigen3.cpp │ │ │ │ │ │ │ ├── eigen3_interface.hh │ │ │ │ │ │ │ ├── main_adv.cpp │ │ │ │ │ │ │ ├── main_linear.cpp │ │ │ │ │ │ │ ├── main_matmat.cpp │ │ │ │ │ │ │ └── main_vecmat.cpp │ │ │ │ │ │ ├── gmm/ │ │ │ │ │ │ │ ├── gmm_LU_solve_interface.hh │ │ │ │ │ │ │ ├── gmm_interface.hh │ │ │ │ │ │ │ └── main.cpp │ │ │ │ │ │ ├── mtl4/ │ │ │ │ │ │ │ ├── .kdbgrc.main │ │ │ │ │ │ │ ├── main.cpp │ │ │ │ │ │ │ ├── mtl4_LU_solve_interface.hh │ │ │ │ │ │ │ └── mtl4_interface.hh │ │ │ │ │ │ ├── tensors/ │ │ │ │ │ │ │ ├── main_linear.cpp │ │ │ │ │ │ │ ├── main_matmat.cpp │ │ │ │ │ │ │ ├── main_vecmat.cpp │ │ │ │ │ │ │ └── tensor_interface.hh │ │ │ │ │ │ ├── tvmet/ │ │ │ │ │ │ │ ├── main.cpp │ │ │ │ │ │ │ └── tvmet_interface.hh │ │ │ │ │ │ └── ublas/ │ │ │ │ │ │ ├── main.cpp │ │ │ │ │ │ └── ublas_interface.hh │ │ │ │ │ ├── check_cache_queries.cpp │ │ │ │ │ ├── dense_solvers.cpp │ │ │ │ │ ├── eig33.cpp │ │ │ │ │ ├── geometry.cpp │ │ │ │ │ ├── perf_monitoring/ │ │ │ │ │ │ ├── gemm.cpp │ │ │ │ │ │ ├── gemm_common.h │ │ │ │ │ │ ├── gemv.cpp │ │ │ │ │ │ ├── gemv_common.h │ │ │ │ │ │ ├── gemvt.cpp │ │ │ │ │ │ ├── lazy_gemm.cpp │ │ │ │ │ │ ├── llt.cpp │ │ │ │ │ │ ├── make_plot.sh │ │ │ │ │ │ ├── resources/ │ │ │ │ │ │ │ ├── chart_footer.html │ │ │ │ │ │ │ ├── chart_header.html │ │ │ │ │ │ │ ├── footer.html │ │ │ │ │ │ │ ├── header.html │ │ │ │ │ │ │ ├── s1.js │ │ │ │ │ │ │ └── s2.js │ │ │ │ │ │ ├── run.sh │ │ │ │ │ │ ├── runall.sh │ │ │ │ │ │ ├── trmv_lo.cpp │ │ │ │ │ │ ├── trmv_lot.cpp │ │ │ │ │ │ ├── trmv_up.cpp │ │ │ │ │ │ └── trmv_upt.cpp │ │ │ │ │ ├── product_threshold.cpp │ │ │ │ │ ├── quat_slerp.cpp │ │ │ │ │ ├── quatmul.cpp │ │ │ │ │ ├── sparse_cholesky.cpp │ │ │ │ │ ├── sparse_dense_product.cpp │ │ │ │ │ ├── sparse_lu.cpp │ │ │ │ │ ├── sparse_product.cpp │ │ │ │ │ ├── sparse_randomsetter.cpp │ │ │ │ │ ├── sparse_setter.cpp │ │ │ │ │ ├── sparse_transpose.cpp │ │ │ │ │ ├── sparse_trisolver.cpp │ │ │ │ │ ├── spbench/ │ │ │ │ │ │ ├── sp_solver.cpp │ │ │ │ │ │ ├── spbench.dtd │ │ │ │ │ │ ├── spbenchsolver.cpp │ │ │ │ │ │ ├── spbenchsolver.h │ │ │ │ │ │ ├── spbenchstyle.h │ │ │ │ │ │ └── test_sparseLU.cpp │ │ │ │ │ ├── spmv.cpp │ │ │ │ │ ├── tensors/ │ │ │ │ │ │ ├── README │ │ │ │ │ │ ├── benchmark.h │ │ │ │ │ │ ├── benchmark_main.cc │ │ │ │ │ │ ├── contraction_benchmarks_cpu.cc │ │ │ │ │ │ ├── eigen_sycl_bench.sh │ │ │ │ │ │ ├── eigen_sycl_bench_contract.sh │ │ │ │ │ │ ├── tensor_benchmarks.h │ │ │ │ │ │ ├── tensor_benchmarks_cpu.cc │ │ │ │ │ │ ├── tensor_benchmarks_fp16_gpu.cu │ │ │ │ │ │ ├── tensor_benchmarks_gpu.cu │ │ │ │ │ │ ├── tensor_benchmarks_sycl.cc │ │ │ │ │ │ └── tensor_contract_sycl_bench.cc │ │ │ │ │ └── vdw_new.cpp │ │ │ │ ├── blas/ │ │ │ │ │ ├── BandTriangularSolver.h │ │ │ │ │ ├── GeneralRank1Update.h │ │ │ │ │ ├── PackedSelfadjointProduct.h │ │ │ │ │ ├── PackedTriangularMatrixVector.h │ │ │ │ │ ├── PackedTriangularSolverVector.h │ │ │ │ │ ├── Rank2Update.h │ │ │ │ │ ├── common.h │ │ │ │ │ ├── complex_double.cpp │ │ │ │ │ ├── complex_single.cpp │ │ │ │ │ ├── double.cpp │ │ │ │ │ ├── f2c/ │ │ │ │ │ │ ├── chbmv.c │ │ │ │ │ │ ├── chpmv.c │ │ │ │ │ │ ├── complexdots.c │ │ │ │ │ │ ├── ctbmv.c │ │ │ │ │ │ ├── d_cnjg.c │ │ │ │ │ │ ├── datatypes.h │ │ │ │ │ │ ├── drotm.c │ │ │ │ │ │ ├── drotmg.c │ │ │ │ │ │ ├── dsbmv.c │ │ │ │ │ │ ├── dspmv.c │ │ │ │ │ │ ├── dtbmv.c │ │ │ │ │ │ ├── lsame.c │ │ │ │ │ │ ├── r_cnjg.c │ │ │ │ │ │ ├── srotm.c │ │ │ │ │ │ ├── srotmg.c │ │ │ │ │ │ ├── ssbmv.c │ │ │ │ │ │ ├── sspmv.c │ │ │ │ │ │ ├── stbmv.c │ │ │ │ │ │ ├── zhbmv.c │ │ │ │ │ │ ├── zhpmv.c │ │ │ │ │ │ └── ztbmv.c │ │ │ │ │ ├── fortran/ │ │ │ │ │ │ └── complexdots.f │ │ │ │ │ ├── level1_cplx_impl.h │ │ │ │ │ ├── level1_impl.h │ │ │ │ │ ├── level1_real_impl.h │ │ │ │ │ ├── level2_cplx_impl.h │ │ │ │ │ ├── level2_impl.h │ │ │ │ │ ├── level2_real_impl.h │ │ │ │ │ ├── level3_impl.h │ │ │ │ │ ├── single.cpp │ │ │ │ │ ├── testing/ │ │ │ │ │ │ ├── cblat1.f │ │ │ │ │ │ ├── cblat2.f │ │ │ │ │ │ ├── cblat3.f │ │ │ │ │ │ ├── dblat1.f │ │ │ │ │ │ ├── dblat2.f │ │ │ │ │ │ ├── dblat3.f │ │ │ │ │ │ ├── runblastest.sh │ │ │ │ │ │ ├── sblat1.f │ │ │ │ │ │ ├── sblat2.f │ │ │ │ │ │ ├── sblat3.f │ │ │ │ │ │ ├── zblat1.f │ │ │ │ │ │ ├── zblat2.f │ │ │ │ │ │ └── zblat3.f │ │ │ │ │ └── xerbla.cpp │ │ │ │ ├── ci/ │ │ │ │ │ ├── CTest2JUnit.xsl │ │ │ │ │ ├── README.md │ │ │ │ │ ├── smoketests.gitlab-ci.yml │ │ │ │ │ └── test.gitlab-ci.yml │ │ │ │ ├── cmake/ │ │ │ │ │ ├── ComputeCppCompilerChecks.cmake │ │ │ │ │ ├── ComputeCppIRMap.cmake │ │ │ │ │ ├── Eigen3Config.cmake.in │ │ │ │ │ ├── EigenConfigureTesting.cmake │ │ │ │ │ ├── EigenSmokeTestList.cmake │ │ │ │ │ ├── EigenTesting.cmake │ │ │ │ │ ├── EigenUninstall.cmake │ │ │ │ │ ├── FindAdolc.cmake │ │ │ │ │ ├── FindBLAS.cmake │ │ │ │ │ ├── FindBLASEXT.cmake │ │ │ │ │ ├── FindCHOLMOD.cmake │ │ │ │ │ ├── FindComputeCpp.cmake │ │ │ │ │ ├── FindFFTW.cmake │ │ │ │ │ ├── FindGLEW.cmake │ │ │ │ │ ├── FindGMP.cmake │ │ │ │ │ ├── FindGSL.cmake │ │ │ │ │ ├── FindGoogleHash.cmake │ │ │ │ │ ├── FindHWLOC.cmake │ │ │ │ │ ├── FindKLU.cmake │ │ │ │ │ ├── FindLAPACK.cmake │ │ │ │ │ ├── FindMPFR.cmake │ │ │ │ │ ├── FindMPREAL.cmake │ │ │ │ │ ├── FindMetis.cmake │ │ │ │ │ ├── FindPASTIX.cmake │ │ │ │ │ ├── FindPTSCOTCH.cmake │ │ │ │ │ ├── FindSCOTCH.cmake │ │ │ │ │ ├── FindSPQR.cmake │ │ │ │ │ ├── FindStandardMathLibrary.cmake │ │ │ │ │ ├── FindSuperLU.cmake │ │ │ │ │ ├── FindTriSYCL.cmake │ │ │ │ │ ├── FindUMFPACK.cmake │ │ │ │ │ └── RegexUtils.cmake │ │ │ │ ├── debug/ │ │ │ │ │ ├── gdb/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── printers.py │ │ │ │ │ ├── lldb/ │ │ │ │ │ │ └── eigenlldb.py │ │ │ │ │ └── msvc/ │ │ │ │ │ └── eigen.natvis │ │ │ │ ├── demos/ │ │ │ │ │ ├── mandelbrot/ │ │ │ │ │ │ ├── README │ │ │ │ │ │ ├── mandelbrot.cpp │ │ │ │ │ │ └── mandelbrot.h │ │ │ │ │ ├── mix_eigen_and_c/ │ │ │ │ │ │ ├── README │ │ │ │ │ │ ├── binary_library.cpp │ │ │ │ │ │ ├── binary_library.h │ │ │ │ │ │ └── example.c │ │ │ │ │ └── opengl/ │ │ │ │ │ ├── README │ │ │ │ │ ├── camera.cpp │ │ │ │ │ ├── camera.h │ │ │ │ │ ├── gpuhelper.cpp │ │ │ │ │ ├── gpuhelper.h │ │ │ │ │ ├── icosphere.cpp │ │ │ │ │ ├── icosphere.h │ │ │ │ │ ├── quaternion_demo.cpp │ │ │ │ │ ├── quaternion_demo.h │ │ │ │ │ ├── trackball.cpp │ │ │ │ │ └── trackball.h │ │ │ │ ├── doc/ │ │ │ │ │ ├── B01_Experimental.dox │ │ │ │ │ ├── ClassHierarchy.dox │ │ │ │ │ ├── CoeffwiseMathFunctionsTable.dox │ │ │ │ │ ├── CustomizingEigen_CustomScalar.dox │ │ │ │ │ ├── CustomizingEigen_InheritingMatrix.dox │ │ │ │ │ ├── CustomizingEigen_NullaryExpr.dox │ │ │ │ │ ├── CustomizingEigen_Plugins.dox │ │ │ │ │ ├── DenseDecompositionBenchmark.dox │ │ │ │ │ ├── Doxyfile.in │ │ │ │ │ ├── FixedSizeVectorizable.dox │ │ │ │ │ ├── FunctionsTakingEigenTypes.dox │ │ │ │ │ ├── HiPerformance.dox │ │ │ │ │ ├── InplaceDecomposition.dox │ │ │ │ │ ├── InsideEigenExample.dox │ │ │ │ │ ├── LeastSquares.dox │ │ │ │ │ ├── Manual.dox │ │ │ │ │ ├── MatrixfreeSolverExample.dox │ │ │ │ │ ├── NewExpressionType.dox │ │ │ │ │ ├── Overview.dox │ │ │ │ │ ├── PassingByValue.dox │ │ │ │ │ ├── Pitfalls.dox │ │ │ │ │ ├── PreprocessorDirectives.dox │ │ │ │ │ ├── QuickReference.dox │ │ │ │ │ ├── QuickStartGuide.dox │ │ │ │ │ ├── SparseLinearSystems.dox │ │ │ │ │ ├── SparseQuickReference.dox │ │ │ │ │ ├── StlContainers.dox │ │ │ │ │ ├── StorageOrders.dox │ │ │ │ │ ├── StructHavingEigenMembers.dox │ │ │ │ │ ├── TemplateKeyword.dox │ │ │ │ │ ├── TopicAliasing.dox │ │ │ │ │ ├── TopicAssertions.dox │ │ │ │ │ ├── TopicCMakeGuide.dox │ │ │ │ │ ├── TopicEigenExpressionTemplates.dox │ │ │ │ │ ├── TopicLazyEvaluation.dox │ │ │ │ │ ├── TopicLinearAlgebraDecompositions.dox │ │ │ │ │ ├── TopicMultithreading.dox │ │ │ │ │ ├── TopicResizing.dox │ │ │ │ │ ├── TopicScalarTypes.dox │ │ │ │ │ ├── TopicVectorization.dox │ │ │ │ │ ├── TutorialAdvancedInitialization.dox │ │ │ │ │ ├── TutorialArrayClass.dox │ │ │ │ │ ├── TutorialBlockOperations.dox │ │ │ │ │ ├── TutorialGeometry.dox │ │ │ │ │ ├── TutorialLinearAlgebra.dox │ │ │ │ │ ├── TutorialMapClass.dox │ │ │ │ │ ├── TutorialMatrixArithmetic.dox │ │ │ │ │ ├── TutorialMatrixClass.dox │ │ │ │ │ ├── TutorialReductionsVisitorsBroadcasting.dox │ │ │ │ │ ├── TutorialReshape.dox │ │ │ │ │ ├── TutorialSTL.dox │ │ │ │ │ ├── TutorialSlicingIndexing.dox │ │ │ │ │ ├── TutorialSparse.dox │ │ │ │ │ ├── TutorialSparse_example_details.dox │ │ │ │ │ ├── UnalignedArrayAssert.dox │ │ │ │ │ ├── UsingBlasLapackBackends.dox │ │ │ │ │ ├── UsingIntelMKL.dox │ │ │ │ │ ├── UsingNVCC.dox │ │ │ │ │ ├── WrongStackAlignment.dox │ │ │ │ │ ├── eigen_navtree_hacks.js │ │ │ │ │ ├── eigendoxy.css │ │ │ │ │ ├── eigendoxy_footer.html.in │ │ │ │ │ ├── eigendoxy_header.html.in │ │ │ │ │ ├── eigendoxy_layout.xml.in │ │ │ │ │ ├── eigendoxy_tabs.css │ │ │ │ │ ├── examples/ │ │ │ │ │ │ ├── .krazy │ │ │ │ │ │ ├── CustomizingEigen_Inheritance.cpp │ │ │ │ │ │ ├── Cwise_erf.cpp │ │ │ │ │ │ ├── Cwise_erfc.cpp │ │ │ │ │ │ ├── Cwise_lgamma.cpp │ │ │ │ │ │ ├── DenseBase_middleCols_int.cpp │ │ │ │ │ │ ├── DenseBase_middleRows_int.cpp │ │ │ │ │ │ ├── DenseBase_template_int_middleCols.cpp │ │ │ │ │ │ ├── DenseBase_template_int_middleRows.cpp │ │ │ │ │ │ ├── QuickStart_example.cpp │ │ │ │ │ │ ├── QuickStart_example2_dynamic.cpp │ │ │ │ │ │ ├── QuickStart_example2_fixed.cpp │ │ │ │ │ │ ├── TemplateKeyword_flexible.cpp │ │ │ │ │ │ ├── TemplateKeyword_simple.cpp │ │ │ │ │ │ ├── TutorialInplaceLU.cpp │ │ │ │ │ │ ├── TutorialLinAlgComputeTwice.cpp │ │ │ │ │ │ ├── TutorialLinAlgExComputeSolveError.cpp │ │ │ │ │ │ ├── TutorialLinAlgExSolveColPivHouseholderQR.cpp │ │ │ │ │ │ ├── TutorialLinAlgExSolveLDLT.cpp │ │ │ │ │ │ ├── TutorialLinAlgInverseDeterminant.cpp │ │ │ │ │ │ ├── TutorialLinAlgRankRevealing.cpp │ │ │ │ │ │ ├── TutorialLinAlgSVDSolve.cpp │ │ │ │ │ │ ├── TutorialLinAlgSelfAdjointEigenSolver.cpp │ │ │ │ │ │ ├── TutorialLinAlgSetThreshold.cpp │ │ │ │ │ │ ├── Tutorial_ArrayClass_accessors.cpp │ │ │ │ │ │ ├── Tutorial_ArrayClass_addition.cpp │ │ │ │ │ │ ├── Tutorial_ArrayClass_cwise_other.cpp │ │ │ │ │ │ ├── Tutorial_ArrayClass_interop.cpp │ │ │ │ │ │ ├── Tutorial_ArrayClass_interop_matrix.cpp │ │ │ │ │ │ ├── Tutorial_ArrayClass_mult.cpp │ │ │ │ │ │ ├── Tutorial_BlockOperations_block_assignment.cpp │ │ │ │ │ │ ├── Tutorial_BlockOperations_colrow.cpp │ │ │ │ │ │ ├── Tutorial_BlockOperations_corner.cpp │ │ │ │ │ │ ├── Tutorial_BlockOperations_print_block.cpp │ │ │ │ │ │ ├── Tutorial_BlockOperations_vector.cpp │ │ │ │ │ │ ├── Tutorial_PartialLU_solve.cpp │ │ │ │ │ │ ├── Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp │ │ │ │ │ │ ├── Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.cpp │ │ │ │ │ │ ├── Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp │ │ │ │ │ │ ├── Tutorial_ReductionsVisitorsBroadcasting_colwise.cpp │ │ │ │ │ │ ├── Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp │ │ │ │ │ │ ├── Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp │ │ │ │ │ │ ├── Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp │ │ │ │ │ │ ├── Tutorial_ReductionsVisitorsBroadcasting_reductions_operatornorm.cpp │ │ │ │ │ │ ├── Tutorial_ReductionsVisitorsBroadcasting_rowwise.cpp │ │ │ │ │ │ ├── Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp │ │ │ │ │ │ ├── Tutorial_simple_example_dynamic_size.cpp │ │ │ │ │ │ ├── Tutorial_simple_example_fixed_size.cpp │ │ │ │ │ │ ├── class_Block.cpp │ │ │ │ │ │ ├── class_CwiseBinaryOp.cpp │ │ │ │ │ │ ├── class_CwiseUnaryOp.cpp │ │ │ │ │ │ ├── class_CwiseUnaryOp_ptrfun.cpp │ │ │ │ │ │ ├── class_FixedBlock.cpp │ │ │ │ │ │ ├── class_FixedReshaped.cpp │ │ │ │ │ │ ├── class_FixedVectorBlock.cpp │ │ │ │ │ │ ├── class_Reshaped.cpp │ │ │ │ │ │ ├── class_VectorBlock.cpp │ │ │ │ │ │ ├── function_taking_eigenbase.cpp │ │ │ │ │ │ ├── function_taking_ref.cpp │ │ │ │ │ │ ├── make_circulant.cpp │ │ │ │ │ │ ├── make_circulant.cpp.entry │ │ │ │ │ │ ├── make_circulant.cpp.evaluator │ │ │ │ │ │ ├── make_circulant.cpp.expression │ │ │ │ │ │ ├── make_circulant.cpp.main │ │ │ │ │ │ ├── make_circulant.cpp.preamble │ │ │ │ │ │ ├── make_circulant.cpp.traits │ │ │ │ │ │ ├── make_circulant2.cpp │ │ │ │ │ │ ├── matrixfree_cg.cpp │ │ │ │ │ │ ├── nullary_indexing.cpp │ │ │ │ │ │ ├── tut_arithmetic_add_sub.cpp │ │ │ │ │ │ ├── tut_arithmetic_dot_cross.cpp │ │ │ │ │ │ ├── tut_arithmetic_matrix_mul.cpp │ │ │ │ │ │ ├── tut_arithmetic_redux_basic.cpp │ │ │ │ │ │ ├── tut_arithmetic_scalar_mul_div.cpp │ │ │ │ │ │ ├── tut_matrix_coefficient_accessors.cpp │ │ │ │ │ │ ├── tut_matrix_resize.cpp │ │ │ │ │ │ └── tut_matrix_resize_fixed_size.cpp │ │ │ │ │ ├── snippets/ │ │ │ │ │ │ ├── .krazy │ │ │ │ │ │ ├── AngleAxis_mimic_euler.cpp │ │ │ │ │ │ ├── Array_initializer_list_23_cxx11.cpp │ │ │ │ │ │ ├── Array_initializer_list_vector_cxx11.cpp │ │ │ │ │ │ ├── Array_variadic_ctor_cxx11.cpp │ │ │ │ │ │ ├── BiCGSTAB_simple.cpp │ │ │ │ │ │ ├── BiCGSTAB_step_by_step.cpp │ │ │ │ │ │ ├── ColPivHouseholderQR_solve.cpp │ │ │ │ │ │ ├── ComplexEigenSolver_compute.cpp │ │ │ │ │ │ ├── ComplexEigenSolver_eigenvalues.cpp │ │ │ │ │ │ ├── ComplexEigenSolver_eigenvectors.cpp │ │ │ │ │ │ ├── ComplexSchur_compute.cpp │ │ │ │ │ │ ├── ComplexSchur_matrixT.cpp │ │ │ │ │ │ ├── ComplexSchur_matrixU.cpp │ │ │ │ │ │ ├── Cwise_abs.cpp │ │ │ │ │ │ ├── Cwise_abs2.cpp │ │ │ │ │ │ ├── Cwise_acos.cpp │ │ │ │ │ │ ├── Cwise_arg.cpp │ │ │ │ │ │ ├── Cwise_array_power_array.cpp │ │ │ │ │ │ ├── Cwise_asin.cpp │ │ │ │ │ │ ├── Cwise_atan.cpp │ │ │ │ │ │ ├── Cwise_boolean_and.cpp │ │ │ │ │ │ ├── Cwise_boolean_not.cpp │ │ │ │ │ │ ├── Cwise_boolean_or.cpp │ │ │ │ │ │ ├── Cwise_boolean_xor.cpp │ │ │ │ │ │ ├── Cwise_ceil.cpp │ │ │ │ │ │ ├── Cwise_cos.cpp │ │ │ │ │ │ ├── Cwise_cosh.cpp │ │ │ │ │ │ ├── Cwise_cube.cpp │ │ │ │ │ │ ├── Cwise_equal_equal.cpp │ │ │ │ │ │ ├── Cwise_exp.cpp │ │ │ │ │ │ ├── Cwise_floor.cpp │ │ │ │ │ │ ├── Cwise_greater.cpp │ │ │ │ │ │ ├── Cwise_greater_equal.cpp │ │ │ │ │ │ ├── Cwise_inverse.cpp │ │ │ │ │ │ ├── Cwise_isFinite.cpp │ │ │ │ │ │ ├── Cwise_isInf.cpp │ │ │ │ │ │ ├── Cwise_isNaN.cpp │ │ │ │ │ │ ├── Cwise_less.cpp │ │ │ │ │ │ ├── Cwise_less_equal.cpp │ │ │ │ │ │ ├── Cwise_log.cpp │ │ │ │ │ │ ├── Cwise_log10.cpp │ │ │ │ │ │ ├── Cwise_max.cpp │ │ │ │ │ │ ├── Cwise_min.cpp │ │ │ │ │ │ ├── Cwise_minus.cpp │ │ │ │ │ │ ├── Cwise_minus_equal.cpp │ │ │ │ │ │ ├── Cwise_not_equal.cpp │ │ │ │ │ │ ├── Cwise_plus.cpp │ │ │ │ │ │ ├── Cwise_plus_equal.cpp │ │ │ │ │ │ ├── Cwise_pow.cpp │ │ │ │ │ │ ├── Cwise_product.cpp │ │ │ │ │ │ ├── Cwise_quotient.cpp │ │ │ │ │ │ ├── Cwise_rint.cpp │ │ │ │ │ │ ├── Cwise_round.cpp │ │ │ │ │ │ ├── Cwise_scalar_power_array.cpp │ │ │ │ │ │ ├── Cwise_sign.cpp │ │ │ │ │ │ ├── Cwise_sin.cpp │ │ │ │ │ │ ├── Cwise_sinh.cpp │ │ │ │ │ │ ├── Cwise_slash_equal.cpp │ │ │ │ │ │ ├── Cwise_sqrt.cpp │ │ │ │ │ │ ├── Cwise_square.cpp │ │ │ │ │ │ ├── Cwise_tan.cpp │ │ │ │ │ │ ├── Cwise_tanh.cpp │ │ │ │ │ │ ├── Cwise_times_equal.cpp │ │ │ │ │ │ ├── DenseBase_LinSpaced.cpp │ │ │ │ │ │ ├── DenseBase_LinSpacedInt.cpp │ │ │ │ │ │ ├── DenseBase_LinSpaced_seq_deprecated.cpp │ │ │ │ │ │ ├── DenseBase_setLinSpaced.cpp │ │ │ │ │ │ ├── DirectionWise_hnormalized.cpp │ │ │ │ │ │ ├── DirectionWise_replicate.cpp │ │ │ │ │ │ ├── DirectionWise_replicate_int.cpp │ │ │ │ │ │ ├── EigenSolver_EigenSolver_MatrixType.cpp │ │ │ │ │ │ ├── EigenSolver_compute.cpp │ │ │ │ │ │ ├── EigenSolver_eigenvalues.cpp │ │ │ │ │ │ ├── EigenSolver_eigenvectors.cpp │ │ │ │ │ │ ├── EigenSolver_pseudoEigenvectors.cpp │ │ │ │ │ │ ├── FullPivHouseholderQR_solve.cpp │ │ │ │ │ │ ├── FullPivLU_image.cpp │ │ │ │ │ │ ├── FullPivLU_kernel.cpp │ │ │ │ │ │ ├── FullPivLU_solve.cpp │ │ │ │ │ │ ├── GeneralizedEigenSolver.cpp │ │ │ │ │ │ ├── HessenbergDecomposition_compute.cpp │ │ │ │ │ │ ├── HessenbergDecomposition_matrixH.cpp │ │ │ │ │ │ ├── HessenbergDecomposition_packedMatrix.cpp │ │ │ │ │ │ ├── HouseholderQR_householderQ.cpp │ │ │ │ │ │ ├── HouseholderQR_solve.cpp │ │ │ │ │ │ ├── HouseholderSequence_HouseholderSequence.cpp │ │ │ │ │ │ ├── IOFormat.cpp │ │ │ │ │ │ ├── JacobiSVD_basic.cpp │ │ │ │ │ │ ├── Jacobi_makeGivens.cpp │ │ │ │ │ │ ├── Jacobi_makeJacobi.cpp │ │ │ │ │ │ ├── LLT_example.cpp │ │ │ │ │ │ ├── LLT_solve.cpp │ │ │ │ │ │ ├── LeastSquaresNormalEquations.cpp │ │ │ │ │ │ ├── LeastSquaresQR.cpp │ │ │ │ │ │ ├── Map_general_stride.cpp │ │ │ │ │ │ ├── Map_inner_stride.cpp │ │ │ │ │ │ ├── Map_outer_stride.cpp │ │ │ │ │ │ ├── Map_placement_new.cpp │ │ │ │ │ │ ├── Map_simple.cpp │ │ │ │ │ │ ├── MatrixBase_adjoint.cpp │ │ │ │ │ │ ├── MatrixBase_all.cpp │ │ │ │ │ │ ├── MatrixBase_applyOnTheLeft.cpp │ │ │ │ │ │ ├── MatrixBase_applyOnTheRight.cpp │ │ │ │ │ │ ├── MatrixBase_array.cpp │ │ │ │ │ │ ├── MatrixBase_array_const.cpp │ │ │ │ │ │ ├── MatrixBase_asDiagonal.cpp │ │ │ │ │ │ ├── MatrixBase_block_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_block_int_int_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_bottomLeftCorner_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_bottomRightCorner_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_bottomRows_int.cpp │ │ │ │ │ │ ├── MatrixBase_cast.cpp │ │ │ │ │ │ ├── MatrixBase_col.cpp │ │ │ │ │ │ ├── MatrixBase_colwise.cpp │ │ │ │ │ │ ├── MatrixBase_colwise_iterator_cxx11.cpp │ │ │ │ │ │ ├── MatrixBase_computeInverseAndDetWithCheck.cpp │ │ │ │ │ │ ├── MatrixBase_computeInverseWithCheck.cpp │ │ │ │ │ │ ├── MatrixBase_cwiseAbs.cpp │ │ │ │ │ │ ├── MatrixBase_cwiseAbs2.cpp │ │ │ │ │ │ ├── MatrixBase_cwiseArg.cpp │ │ │ │ │ │ ├── MatrixBase_cwiseEqual.cpp │ │ │ │ │ │ ├── MatrixBase_cwiseInverse.cpp │ │ │ │ │ │ ├── MatrixBase_cwiseMax.cpp │ │ │ │ │ │ ├── MatrixBase_cwiseMin.cpp │ │ │ │ │ │ ├── MatrixBase_cwiseNotEqual.cpp │ │ │ │ │ │ ├── MatrixBase_cwiseProduct.cpp │ │ │ │ │ │ ├── MatrixBase_cwiseQuotient.cpp │ │ │ │ │ │ ├── MatrixBase_cwiseSign.cpp │ │ │ │ │ │ ├── MatrixBase_cwiseSqrt.cpp │ │ │ │ │ │ ├── MatrixBase_diagonal.cpp │ │ │ │ │ │ ├── MatrixBase_diagonal_int.cpp │ │ │ │ │ │ ├── MatrixBase_diagonal_template_int.cpp │ │ │ │ │ │ ├── MatrixBase_eigenvalues.cpp │ │ │ │ │ │ ├── MatrixBase_end_int.cpp │ │ │ │ │ │ ├── MatrixBase_eval.cpp │ │ │ │ │ │ ├── MatrixBase_fixedBlock_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_hnormalized.cpp │ │ │ │ │ │ ├── MatrixBase_homogeneous.cpp │ │ │ │ │ │ ├── MatrixBase_identity.cpp │ │ │ │ │ │ ├── MatrixBase_identity_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_inverse.cpp │ │ │ │ │ │ ├── MatrixBase_isDiagonal.cpp │ │ │ │ │ │ ├── MatrixBase_isIdentity.cpp │ │ │ │ │ │ ├── MatrixBase_isOnes.cpp │ │ │ │ │ │ ├── MatrixBase_isOrthogonal.cpp │ │ │ │ │ │ ├── MatrixBase_isUnitary.cpp │ │ │ │ │ │ ├── MatrixBase_isZero.cpp │ │ │ │ │ │ ├── MatrixBase_leftCols_int.cpp │ │ │ │ │ │ ├── MatrixBase_noalias.cpp │ │ │ │ │ │ ├── MatrixBase_ones.cpp │ │ │ │ │ │ ├── MatrixBase_ones_int.cpp │ │ │ │ │ │ ├── MatrixBase_ones_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_operatorNorm.cpp │ │ │ │ │ │ ├── MatrixBase_prod.cpp │ │ │ │ │ │ ├── MatrixBase_random.cpp │ │ │ │ │ │ ├── MatrixBase_random_int.cpp │ │ │ │ │ │ ├── MatrixBase_random_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_replicate.cpp │ │ │ │ │ │ ├── MatrixBase_replicate_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_reshaped_auto.cpp │ │ │ │ │ │ ├── MatrixBase_reshaped_fixed.cpp │ │ │ │ │ │ ├── MatrixBase_reshaped_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_reshaped_to_vector.cpp │ │ │ │ │ │ ├── MatrixBase_reverse.cpp │ │ │ │ │ │ ├── MatrixBase_rightCols_int.cpp │ │ │ │ │ │ ├── MatrixBase_row.cpp │ │ │ │ │ │ ├── MatrixBase_rowwise.cpp │ │ │ │ │ │ ├── MatrixBase_segment_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_select.cpp │ │ │ │ │ │ ├── MatrixBase_selfadjointView.cpp │ │ │ │ │ │ ├── MatrixBase_set.cpp │ │ │ │ │ │ ├── MatrixBase_setIdentity.cpp │ │ │ │ │ │ ├── MatrixBase_setOnes.cpp │ │ │ │ │ │ ├── MatrixBase_setRandom.cpp │ │ │ │ │ │ ├── MatrixBase_setZero.cpp │ │ │ │ │ │ ├── MatrixBase_start_int.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_bottomRows.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_end.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_int_block_int_int_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_int_bottomLeftCorner.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_int_bottomLeftCorner_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_int_bottomRightCorner.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_int_bottomRightCorner_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_int_topLeftCorner.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_int_topLeftCorner_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_int_topRightCorner.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_int_topRightCorner_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_leftCols.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_rightCols.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_segment.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_start.cpp │ │ │ │ │ │ ├── MatrixBase_template_int_topRows.cpp │ │ │ │ │ │ ├── MatrixBase_topLeftCorner_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_topRightCorner_int_int.cpp │ │ │ │ │ │ ├── MatrixBase_topRows_int.cpp │ │ │ │ │ │ ├── MatrixBase_transpose.cpp │ │ │ │ │ │ ├── MatrixBase_triangularView.cpp │ │ │ │ │ │ ├── MatrixBase_zero.cpp │ │ │ │ │ │ ├── MatrixBase_zero_int.cpp │ │ │ │ │ │ ├── MatrixBase_zero_int_int.cpp │ │ │ │ │ │ ├── Matrix_Map_stride.cpp │ │ │ │ │ │ ├── Matrix_initializer_list_23_cxx11.cpp │ │ │ │ │ │ ├── Matrix_initializer_list_vector_cxx11.cpp │ │ │ │ │ │ ├── Matrix_resize_NoChange_int.cpp │ │ │ │ │ │ ├── Matrix_resize_int.cpp │ │ │ │ │ │ ├── Matrix_resize_int_NoChange.cpp │ │ │ │ │ │ ├── Matrix_resize_int_int.cpp │ │ │ │ │ │ ├── Matrix_setConstant_int.cpp │ │ │ │ │ │ ├── Matrix_setConstant_int_int.cpp │ │ │ │ │ │ ├── Matrix_setIdentity_int_int.cpp │ │ │ │ │ │ ├── Matrix_setOnes_int.cpp │ │ │ │ │ │ ├── Matrix_setOnes_int_int.cpp │ │ │ │ │ │ ├── Matrix_setRandom_int.cpp │ │ │ │ │ │ ├── Matrix_setRandom_int_int.cpp │ │ │ │ │ │ ├── Matrix_setZero_int.cpp │ │ │ │ │ │ ├── Matrix_setZero_int_int.cpp │ │ │ │ │ │ ├── Matrix_variadic_ctor_cxx11.cpp │ │ │ │ │ │ ├── PartialPivLU_solve.cpp │ │ │ │ │ │ ├── PartialRedux_count.cpp │ │ │ │ │ │ ├── PartialRedux_maxCoeff.cpp │ │ │ │ │ │ ├── PartialRedux_minCoeff.cpp │ │ │ │ │ │ ├── PartialRedux_norm.cpp │ │ │ │ │ │ ├── PartialRedux_prod.cpp │ │ │ │ │ │ ├── PartialRedux_squaredNorm.cpp │ │ │ │ │ │ ├── PartialRedux_sum.cpp │ │ │ │ │ │ ├── RealQZ_compute.cpp │ │ │ │ │ │ ├── RealSchur_RealSchur_MatrixType.cpp │ │ │ │ │ │ ├── RealSchur_compute.cpp │ │ │ │ │ │ ├── SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp │ │ │ │ │ │ ├── SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp │ │ │ │ │ │ ├── SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp │ │ │ │ │ │ ├── SelfAdjointEigenSolver_compute_MatrixType.cpp │ │ │ │ │ │ ├── SelfAdjointEigenSolver_compute_MatrixType2.cpp │ │ │ │ │ │ ├── SelfAdjointEigenSolver_eigenvalues.cpp │ │ │ │ │ │ ├── SelfAdjointEigenSolver_eigenvectors.cpp │ │ │ │ │ │ ├── SelfAdjointEigenSolver_operatorInverseSqrt.cpp │ │ │ │ │ │ ├── SelfAdjointEigenSolver_operatorSqrt.cpp │ │ │ │ │ │ ├── SelfAdjointView_eigenvalues.cpp │ │ │ │ │ │ ├── SelfAdjointView_operatorNorm.cpp │ │ │ │ │ │ ├── Slicing_arrayexpr.cpp │ │ │ │ │ │ ├── Slicing_custom_padding_cxx11.cpp │ │ │ │ │ │ ├── Slicing_rawarray_cxx11.cpp │ │ │ │ │ │ ├── Slicing_stdvector_cxx11.cpp │ │ │ │ │ │ ├── SparseMatrix_coeffs.cpp │ │ │ │ │ │ ├── TopicAliasing_block.cpp │ │ │ │ │ │ ├── TopicAliasing_block_correct.cpp │ │ │ │ │ │ ├── TopicAliasing_cwise.cpp │ │ │ │ │ │ ├── TopicAliasing_mult1.cpp │ │ │ │ │ │ ├── TopicAliasing_mult2.cpp │ │ │ │ │ │ ├── TopicAliasing_mult3.cpp │ │ │ │ │ │ ├── TopicAliasing_mult4.cpp │ │ │ │ │ │ ├── TopicAliasing_mult5.cpp │ │ │ │ │ │ ├── TopicStorageOrders_example.cpp │ │ │ │ │ │ ├── Triangular_solve.cpp │ │ │ │ │ │ ├── Tridiagonalization_Tridiagonalization_MatrixType.cpp │ │ │ │ │ │ ├── Tridiagonalization_compute.cpp │ │ │ │ │ │ ├── Tridiagonalization_decomposeInPlace.cpp │ │ │ │ │ │ ├── Tridiagonalization_diagonal.cpp │ │ │ │ │ │ ├── Tridiagonalization_householderCoefficients.cpp │ │ │ │ │ │ ├── Tridiagonalization_packedMatrix.cpp │ │ │ │ │ │ ├── Tutorial_AdvancedInitialization_Block.cpp │ │ │ │ │ │ ├── Tutorial_AdvancedInitialization_CommaTemporary.cpp │ │ │ │ │ │ ├── Tutorial_AdvancedInitialization_Join.cpp │ │ │ │ │ │ ├── Tutorial_AdvancedInitialization_LinSpaced.cpp │ │ │ │ │ │ ├── Tutorial_AdvancedInitialization_ThreeWays.cpp │ │ │ │ │ │ ├── Tutorial_AdvancedInitialization_Zero.cpp │ │ │ │ │ │ ├── Tutorial_Map_rowmajor.cpp │ │ │ │ │ │ ├── Tutorial_Map_using.cpp │ │ │ │ │ │ ├── Tutorial_ReshapeMat2Mat.cpp │ │ │ │ │ │ ├── Tutorial_ReshapeMat2Vec.cpp │ │ │ │ │ │ ├── Tutorial_SlicingCol.cpp │ │ │ │ │ │ ├── Tutorial_SlicingVec.cpp │ │ │ │ │ │ ├── Tutorial_commainit_01.cpp │ │ │ │ │ │ ├── Tutorial_commainit_01b.cpp │ │ │ │ │ │ ├── Tutorial_commainit_02.cpp │ │ │ │ │ │ ├── Tutorial_range_for_loop_1d_cxx11.cpp │ │ │ │ │ │ ├── Tutorial_range_for_loop_2d_cxx11.cpp │ │ │ │ │ │ ├── Tutorial_reshaped_vs_resize_1.cpp │ │ │ │ │ │ ├── Tutorial_reshaped_vs_resize_2.cpp │ │ │ │ │ │ ├── Tutorial_solve_matrix_inverse.cpp │ │ │ │ │ │ ├── Tutorial_solve_multiple_rhs.cpp │ │ │ │ │ │ ├── Tutorial_solve_reuse_decomposition.cpp │ │ │ │ │ │ ├── Tutorial_solve_singular.cpp │ │ │ │ │ │ ├── Tutorial_solve_triangular.cpp │ │ │ │ │ │ ├── Tutorial_solve_triangular_inplace.cpp │ │ │ │ │ │ ├── Tutorial_std_sort.cpp │ │ │ │ │ │ ├── Tutorial_std_sort_rows_cxx11.cpp │ │ │ │ │ │ ├── VectorwiseOp_homogeneous.cpp │ │ │ │ │ │ ├── Vectorwise_reverse.cpp │ │ │ │ │ │ ├── class_FullPivLU.cpp │ │ │ │ │ │ ├── compile_snippet.cpp.in │ │ │ │ │ │ ├── tut_arithmetic_redux_minmax.cpp │ │ │ │ │ │ ├── tut_arithmetic_transpose_aliasing.cpp │ │ │ │ │ │ ├── tut_arithmetic_transpose_conjugate.cpp │ │ │ │ │ │ ├── tut_arithmetic_transpose_inplace.cpp │ │ │ │ │ │ └── tut_matrix_assignment_resizing.cpp │ │ │ │ │ ├── special_examples/ │ │ │ │ │ │ ├── Tutorial_sparse_example.cpp │ │ │ │ │ │ ├── Tutorial_sparse_example_details.cpp │ │ │ │ │ │ └── random_cpp11.cpp │ │ │ │ │ └── tutorial.cpp │ │ │ │ ├── eigen3.pc.in │ │ │ │ ├── failtest/ │ │ │ │ │ ├── bdcsvd_int.cpp │ │ │ │ │ ├── block_nonconst_ctor_on_const_xpr_0.cpp │ │ │ │ │ ├── block_nonconst_ctor_on_const_xpr_1.cpp │ │ │ │ │ ├── block_nonconst_ctor_on_const_xpr_2.cpp │ │ │ │ │ ├── block_on_const_type_actually_const_0.cpp │ │ │ │ │ ├── block_on_const_type_actually_const_1.cpp │ │ │ │ │ ├── colpivqr_int.cpp │ │ │ │ │ ├── const_qualified_block_method_retval_0.cpp │ │ │ │ │ ├── const_qualified_block_method_retval_1.cpp │ │ │ │ │ ├── const_qualified_diagonal_method_retval.cpp │ │ │ │ │ ├── const_qualified_transpose_method_retval.cpp │ │ │ │ │ ├── cwiseunaryview_nonconst_ctor_on_const_xpr.cpp │ │ │ │ │ ├── cwiseunaryview_on_const_type_actually_const.cpp │ │ │ │ │ ├── diagonal_nonconst_ctor_on_const_xpr.cpp │ │ │ │ │ ├── diagonal_on_const_type_actually_const.cpp │ │ │ │ │ ├── eigensolver_cplx.cpp │ │ │ │ │ ├── eigensolver_int.cpp │ │ │ │ │ ├── failtest_sanity_check.cpp │ │ │ │ │ ├── fullpivlu_int.cpp │ │ │ │ │ ├── fullpivqr_int.cpp │ │ │ │ │ ├── initializer_list_1.cpp │ │ │ │ │ ├── initializer_list_2.cpp │ │ │ │ │ ├── jacobisvd_int.cpp │ │ │ │ │ ├── ldlt_int.cpp │ │ │ │ │ ├── llt_int.cpp │ │ │ │ │ ├── map_nonconst_ctor_on_const_ptr_0.cpp │ │ │ │ │ ├── map_nonconst_ctor_on_const_ptr_1.cpp │ │ │ │ │ ├── map_nonconst_ctor_on_const_ptr_2.cpp │ │ │ │ │ ├── map_nonconst_ctor_on_const_ptr_3.cpp │ │ │ │ │ ├── map_nonconst_ctor_on_const_ptr_4.cpp │ │ │ │ │ ├── map_on_const_type_actually_const_0.cpp │ │ │ │ │ ├── map_on_const_type_actually_const_1.cpp │ │ │ │ │ ├── partialpivlu_int.cpp │ │ │ │ │ ├── qr_int.cpp │ │ │ │ │ ├── ref_1.cpp │ │ │ │ │ ├── ref_2.cpp │ │ │ │ │ ├── ref_3.cpp │ │ │ │ │ ├── ref_4.cpp │ │ │ │ │ ├── ref_5.cpp │ │ │ │ │ ├── selfadjointview_nonconst_ctor_on_const_xpr.cpp │ │ │ │ │ ├── selfadjointview_on_const_type_actually_const.cpp │ │ │ │ │ ├── sparse_ref_1.cpp │ │ │ │ │ ├── sparse_ref_2.cpp │ │ │ │ │ ├── sparse_ref_3.cpp │ │ │ │ │ ├── sparse_ref_4.cpp │ │ │ │ │ ├── sparse_ref_5.cpp │ │ │ │ │ ├── sparse_storage_mismatch.cpp │ │ │ │ │ ├── swap_1.cpp │ │ │ │ │ ├── swap_2.cpp │ │ │ │ │ ├── ternary_1.cpp │ │ │ │ │ ├── ternary_2.cpp │ │ │ │ │ ├── transpose_nonconst_ctor_on_const_xpr.cpp │ │ │ │ │ ├── transpose_on_const_type_actually_const.cpp │ │ │ │ │ ├── triangularview_nonconst_ctor_on_const_xpr.cpp │ │ │ │ │ └── triangularview_on_const_type_actually_const.cpp │ │ │ │ ├── lapack/ │ │ │ │ │ ├── cholesky.cpp │ │ │ │ │ ├── clacgv.f │ │ │ │ │ ├── cladiv.f │ │ │ │ │ ├── clarf.f │ │ │ │ │ ├── clarfb.f │ │ │ │ │ ├── clarfg.f │ │ │ │ │ ├── clarft.f │ │ │ │ │ ├── complex_double.cpp │ │ │ │ │ ├── complex_single.cpp │ │ │ │ │ ├── dladiv.f │ │ │ │ │ ├── dlamch.f │ │ │ │ │ ├── dlapy2.f │ │ │ │ │ ├── dlapy3.f │ │ │ │ │ ├── dlarf.f │ │ │ │ │ ├── dlarfb.f │ │ │ │ │ ├── dlarfg.f │ │ │ │ │ ├── dlarft.f │ │ │ │ │ ├── double.cpp │ │ │ │ │ ├── dsecnd_NONE.f │ │ │ │ │ ├── eigenvalues.cpp │ │ │ │ │ ├── ilaclc.f │ │ │ │ │ ├── ilaclr.f │ │ │ │ │ ├── iladlc.f │ │ │ │ │ ├── iladlr.f │ │ │ │ │ ├── ilaslc.f │ │ │ │ │ ├── ilaslr.f │ │ │ │ │ ├── ilazlc.f │ │ │ │ │ ├── ilazlr.f │ │ │ │ │ ├── lapack_common.h │ │ │ │ │ ├── lu.cpp │ │ │ │ │ ├── second_NONE.f │ │ │ │ │ ├── single.cpp │ │ │ │ │ ├── sladiv.f │ │ │ │ │ ├── slamch.f │ │ │ │ │ ├── slapy2.f │ │ │ │ │ ├── slapy3.f │ │ │ │ │ ├── slarf.f │ │ │ │ │ ├── slarfb.f │ │ │ │ │ ├── slarfg.f │ │ │ │ │ ├── slarft.f │ │ │ │ │ ├── svd.cpp │ │ │ │ │ ├── zlacgv.f │ │ │ │ │ ├── zladiv.f │ │ │ │ │ ├── zlarf.f │ │ │ │ │ ├── zlarfb.f │ │ │ │ │ ├── zlarfg.f │ │ │ │ │ └── zlarft.f │ │ │ │ ├── scripts/ │ │ │ │ │ ├── cdashtesting.cmake.in │ │ │ │ │ ├── check.in │ │ │ │ │ ├── debug.in │ │ │ │ │ ├── eigen_gen_credits.cpp │ │ │ │ │ ├── eigen_gen_docs │ │ │ │ │ ├── eigen_gen_split_test_help.cmake │ │ │ │ │ ├── eigen_monitor_perf.sh │ │ │ │ │ ├── release.in │ │ │ │ │ └── relicense.py │ │ │ │ ├── signature_of_eigen3_matrix_library │ │ │ │ ├── test/ │ │ │ │ │ ├── AnnoyingScalar.h │ │ │ │ │ ├── MovableScalar.h │ │ │ │ │ ├── OffByOneScalar.h │ │ │ │ │ ├── SafeScalar.h │ │ │ │ │ ├── adjoint.cpp │ │ │ │ │ ├── array_cwise.cpp │ │ │ │ │ ├── array_for_matrix.cpp │ │ │ │ │ ├── array_of_string.cpp │ │ │ │ │ ├── array_replicate.cpp │ │ │ │ │ ├── array_reverse.cpp │ │ │ │ │ ├── bandmatrix.cpp │ │ │ │ │ ├── basicstuff.cpp │ │ │ │ │ ├── bdcsvd.cpp │ │ │ │ │ ├── bfloat16_float.cpp │ │ │ │ │ ├── bicgstab.cpp │ │ │ │ │ ├── blasutil.cpp │ │ │ │ │ ├── block.cpp │ │ │ │ │ ├── boostmultiprec.cpp │ │ │ │ │ ├── bug1213.cpp │ │ │ │ │ ├── bug1213.h │ │ │ │ │ ├── bug1213_main.cpp │ │ │ │ │ ├── cholesky.cpp │ │ │ │ │ ├── cholmod_support.cpp │ │ │ │ │ ├── commainitializer.cpp │ │ │ │ │ ├── conjugate_gradient.cpp │ │ │ │ │ ├── conservative_resize.cpp │ │ │ │ │ ├── constructor.cpp │ │ │ │ │ ├── corners.cpp │ │ │ │ │ ├── ctorleak.cpp │ │ │ │ │ ├── denseLM.cpp │ │ │ │ │ ├── dense_storage.cpp │ │ │ │ │ ├── determinant.cpp │ │ │ │ │ ├── diagonal.cpp │ │ │ │ │ ├── diagonal_matrix_variadic_ctor.cpp │ │ │ │ │ ├── diagonalmatrices.cpp │ │ │ │ │ ├── dontalign.cpp │ │ │ │ │ ├── dynalloc.cpp │ │ │ │ │ ├── eigen2support.cpp │ │ │ │ │ ├── eigensolver_complex.cpp │ │ │ │ │ ├── eigensolver_generalized_real.cpp │ │ │ │ │ ├── eigensolver_generic.cpp │ │ │ │ │ ├── eigensolver_selfadjoint.cpp │ │ │ │ │ ├── evaluator_common.h │ │ │ │ │ ├── evaluators.cpp │ │ │ │ │ ├── exceptions.cpp │ │ │ │ │ ├── fastmath.cpp │ │ │ │ │ ├── first_aligned.cpp │ │ │ │ │ ├── geo_alignedbox.cpp │ │ │ │ │ ├── geo_eulerangles.cpp │ │ │ │ │ ├── geo_homogeneous.cpp │ │ │ │ │ ├── geo_hyperplane.cpp │ │ │ │ │ ├── geo_orthomethods.cpp │ │ │ │ │ ├── geo_parametrizedline.cpp │ │ │ │ │ ├── geo_quaternion.cpp │ │ │ │ │ ├── geo_transformations.cpp │ │ │ │ │ ├── gpu_basic.cu │ │ │ │ │ ├── gpu_common.h │ │ │ │ │ ├── gpu_example.cu │ │ │ │ │ ├── gpu_test_helper.h │ │ │ │ │ ├── half_float.cpp │ │ │ │ │ ├── hessenberg.cpp │ │ │ │ │ ├── householder.cpp │ │ │ │ │ ├── incomplete_cholesky.cpp │ │ │ │ │ ├── indexed_view.cpp │ │ │ │ │ ├── initializer_list_construction.cpp │ │ │ │ │ ├── inplace_decomposition.cpp │ │ │ │ │ ├── integer_types.cpp │ │ │ │ │ ├── inverse.cpp │ │ │ │ │ ├── io.cpp │ │ │ │ │ ├── is_same_dense.cpp │ │ │ │ │ ├── jacobi.cpp │ │ │ │ │ ├── jacobisvd.cpp │ │ │ │ │ ├── klu_support.cpp │ │ │ │ │ ├── linearstructure.cpp │ │ │ │ │ ├── lscg.cpp │ │ │ │ │ ├── lu.cpp │ │ │ │ │ ├── main.h │ │ │ │ │ ├── mapped_matrix.cpp │ │ │ │ │ ├── mapstaticmethods.cpp │ │ │ │ │ ├── mapstride.cpp │ │ │ │ │ ├── meta.cpp │ │ │ │ │ ├── metis_support.cpp │ │ │ │ │ ├── miscmatrices.cpp │ │ │ │ │ ├── mixingtypes.cpp │ │ │ │ │ ├── mpl2only.cpp │ │ │ │ │ ├── nestbyvalue.cpp │ │ │ │ │ ├── nesting_ops.cpp │ │ │ │ │ ├── nomalloc.cpp │ │ │ │ │ ├── nullary.cpp │ │ │ │ │ ├── num_dimensions.cpp │ │ │ │ │ ├── numext.cpp │ │ │ │ │ ├── packetmath.cpp │ │ │ │ │ ├── packetmath_test_shared.h │ │ │ │ │ ├── pardiso_support.cpp │ │ │ │ │ ├── pastix_support.cpp │ │ │ │ │ ├── permutationmatrices.cpp │ │ │ │ │ ├── prec_inverse_4x4.cpp │ │ │ │ │ ├── product.h │ │ │ │ │ ├── product_extra.cpp │ │ │ │ │ ├── product_large.cpp │ │ │ │ │ ├── product_mmtr.cpp │ │ │ │ │ ├── product_notemporary.cpp │ │ │ │ │ ├── product_selfadjoint.cpp │ │ │ │ │ ├── product_small.cpp │ │ │ │ │ ├── product_symm.cpp │ │ │ │ │ ├── product_syrk.cpp │ │ │ │ │ ├── product_trmm.cpp │ │ │ │ │ ├── product_trmv.cpp │ │ │ │ │ ├── product_trsolve.cpp │ │ │ │ │ ├── qr.cpp │ │ │ │ │ ├── qr_colpivoting.cpp │ │ │ │ │ ├── qr_fullpivoting.cpp │ │ │ │ │ ├── qtvector.cpp │ │ │ │ │ ├── rand.cpp │ │ │ │ │ ├── random_matrix.cpp │ │ │ │ │ ├── random_matrix_helper.h │ │ │ │ │ ├── random_without_cast_overflow.h │ │ │ │ │ ├── real_qz.cpp │ │ │ │ │ ├── redux.cpp │ │ │ │ │ ├── ref.cpp │ │ │ │ │ ├── reshape.cpp │ │ │ │ │ ├── resize.cpp │ │ │ │ │ ├── rvalue_types.cpp │ │ │ │ │ ├── schur_complex.cpp │ │ │ │ │ ├── schur_real.cpp │ │ │ │ │ ├── selfadjoint.cpp │ │ │ │ │ ├── serializer.cpp │ │ │ │ │ ├── simplicial_cholesky.cpp │ │ │ │ │ ├── sizeof.cpp │ │ │ │ │ ├── sizeoverflow.cpp │ │ │ │ │ ├── smallvectors.cpp │ │ │ │ │ ├── solverbase.h │ │ │ │ │ ├── sparse.h │ │ │ │ │ ├── sparseLM.cpp │ │ │ │ │ ├── sparse_basic.cpp │ │ │ │ │ ├── sparse_block.cpp │ │ │ │ │ ├── sparse_permutations.cpp │ │ │ │ │ ├── sparse_product.cpp │ │ │ │ │ ├── sparse_ref.cpp │ │ │ │ │ ├── sparse_solver.h │ │ │ │ │ ├── sparse_solvers.cpp │ │ │ │ │ ├── sparse_vector.cpp │ │ │ │ │ ├── sparselu.cpp │ │ │ │ │ ├── sparseqr.cpp │ │ │ │ │ ├── special_numbers.cpp │ │ │ │ │ ├── split_test_helper.h │ │ │ │ │ ├── spqr_support.cpp │ │ │ │ │ ├── stable_norm.cpp │ │ │ │ │ ├── stddeque.cpp │ │ │ │ │ ├── stddeque_overload.cpp │ │ │ │ │ ├── stdlist.cpp │ │ │ │ │ ├── stdlist_overload.cpp │ │ │ │ │ ├── stdvector.cpp │ │ │ │ │ ├── stdvector_overload.cpp │ │ │ │ │ ├── stl_iterators.cpp │ │ │ │ │ ├── superlu_support.cpp │ │ │ │ │ ├── svd_common.h │ │ │ │ │ ├── svd_fill.h │ │ │ │ │ ├── swap.cpp │ │ │ │ │ ├── symbolic_index.cpp │ │ │ │ │ ├── triangular.cpp │ │ │ │ │ ├── tuple_test.cpp │ │ │ │ │ ├── type_alias.cpp │ │ │ │ │ ├── umeyama.cpp │ │ │ │ │ ├── umfpack_support.cpp │ │ │ │ │ ├── unalignedcount.cpp │ │ │ │ │ ├── upperbidiagonalization.cpp │ │ │ │ │ ├── vectorization_logic.cpp │ │ │ │ │ ├── vectorwiseop.cpp │ │ │ │ │ ├── visitor.cpp │ │ │ │ │ └── zerosized.cpp │ │ │ │ └── unsupported/ │ │ │ │ ├── Eigen/ │ │ │ │ │ ├── AdolcForward │ │ │ │ │ ├── AlignedVector3 │ │ │ │ │ ├── ArpackSupport │ │ │ │ │ ├── AutoDiff │ │ │ │ │ ├── BVH │ │ │ │ │ ├── CXX11/ │ │ │ │ │ │ ├── Tensor │ │ │ │ │ │ ├── TensorSymmetry │ │ │ │ │ │ ├── ThreadPool │ │ │ │ │ │ └── src/ │ │ │ │ │ │ ├── Tensor/ │ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ │ ├── Tensor.h │ │ │ │ │ │ │ ├── TensorArgMax.h │ │ │ │ │ │ │ ├── TensorAssign.h │ │ │ │ │ │ │ ├── TensorBase.h │ │ │ │ │ │ │ ├── TensorBlock.h │ │ │ │ │ │ │ ├── TensorBroadcasting.h │ │ │ │ │ │ │ ├── TensorChipping.h │ │ │ │ │ │ │ ├── TensorConcatenation.h │ │ │ │ │ │ │ ├── TensorContraction.h │ │ │ │ │ │ │ ├── TensorContractionBlocking.h │ │ │ │ │ │ │ ├── TensorContractionCuda.h │ │ │ │ │ │ │ ├── TensorContractionGpu.h │ │ │ │ │ │ │ ├── TensorContractionMapper.h │ │ │ │ │ │ │ ├── TensorContractionSycl.h │ │ │ │ │ │ │ ├── TensorContractionThreadPool.h │ │ │ │ │ │ │ ├── TensorConversion.h │ │ │ │ │ │ │ ├── TensorConvolution.h │ │ │ │ │ │ │ ├── TensorConvolutionSycl.h │ │ │ │ │ │ │ ├── TensorCostModel.h │ │ │ │ │ │ │ ├── TensorCustomOp.h │ │ │ │ │ │ │ ├── TensorDevice.h │ │ │ │ │ │ │ ├── TensorDeviceCuda.h │ │ │ │ │ │ │ ├── TensorDeviceDefault.h │ │ │ │ │ │ │ ├── TensorDeviceGpu.h │ │ │ │ │ │ │ ├── TensorDeviceSycl.h │ │ │ │ │ │ │ ├── TensorDeviceThreadPool.h │ │ │ │ │ │ │ ├── TensorDimensionList.h │ │ │ │ │ │ │ ├── TensorDimensions.h │ │ │ │ │ │ │ ├── TensorEvalTo.h │ │ │ │ │ │ │ ├── TensorEvaluator.h │ │ │ │ │ │ │ ├── TensorExecutor.h │ │ │ │ │ │ │ ├── TensorExpr.h │ │ │ │ │ │ │ ├── TensorFFT.h │ │ │ │ │ │ │ ├── TensorFixedSize.h │ │ │ │ │ │ │ ├── TensorForcedEval.h │ │ │ │ │ │ │ ├── TensorForwardDeclarations.h │ │ │ │ │ │ │ ├── TensorFunctors.h │ │ │ │ │ │ │ ├── TensorGenerator.h │ │ │ │ │ │ │ ├── TensorGlobalFunctions.h │ │ │ │ │ │ │ ├── TensorGpuHipCudaDefines.h │ │ │ │ │ │ │ ├── TensorGpuHipCudaUndefines.h │ │ │ │ │ │ │ ├── TensorIO.h │ │ │ │ │ │ │ ├── TensorImagePatch.h │ │ │ │ │ │ │ ├── TensorIndexList.h │ │ │ │ │ │ │ ├── TensorInflation.h │ │ │ │ │ │ │ ├── TensorInitializer.h │ │ │ │ │ │ │ ├── TensorIntDiv.h │ │ │ │ │ │ │ ├── TensorLayoutSwap.h │ │ │ │ │ │ │ ├── TensorMacros.h │ │ │ │ │ │ │ ├── TensorMap.h │ │ │ │ │ │ │ ├── TensorMeta.h │ │ │ │ │ │ │ ├── TensorMorphing.h │ │ │ │ │ │ │ ├── TensorPadding.h │ │ │ │ │ │ │ ├── TensorPatch.h │ │ │ │ │ │ │ ├── TensorRandom.h │ │ │ │ │ │ │ ├── TensorReduction.h │ │ │ │ │ │ │ ├── TensorReductionGpu.h │ │ │ │ │ │ │ ├── TensorReductionSycl.h │ │ │ │ │ │ │ ├── TensorRef.h │ │ │ │ │ │ │ ├── TensorReverse.h │ │ │ │ │ │ │ ├── TensorScan.h │ │ │ │ │ │ │ ├── TensorScanSycl.h │ │ │ │ │ │ │ ├── TensorShuffling.h │ │ │ │ │ │ │ ├── TensorStorage.h │ │ │ │ │ │ │ ├── TensorStriding.h │ │ │ │ │ │ │ ├── TensorTrace.h │ │ │ │ │ │ │ ├── TensorTraits.h │ │ │ │ │ │ │ ├── TensorUInt128.h │ │ │ │ │ │ │ └── TensorVolumePatch.h │ │ │ │ │ │ ├── TensorSymmetry/ │ │ │ │ │ │ │ ├── DynamicSymmetry.h │ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ │ ├── StaticSymmetry.h │ │ │ │ │ │ │ ├── Symmetry.h │ │ │ │ │ │ │ └── util/ │ │ │ │ │ │ │ └── TemplateGroupTheory.h │ │ │ │ │ │ ├── ThreadPool/ │ │ │ │ │ │ │ ├── Barrier.h │ │ │ │ │ │ │ ├── EventCount.h │ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ │ ├── NonBlockingThreadPool.h │ │ │ │ │ │ │ ├── RunQueue.h │ │ │ │ │ │ │ ├── ThreadCancel.h │ │ │ │ │ │ │ ├── ThreadEnvironment.h │ │ │ │ │ │ │ ├── ThreadLocal.h │ │ │ │ │ │ │ ├── ThreadPoolInterface.h │ │ │ │ │ │ │ └── ThreadYield.h │ │ │ │ │ │ └── util/ │ │ │ │ │ │ ├── CXX11Meta.h │ │ │ │ │ │ ├── CXX11Workarounds.h │ │ │ │ │ │ ├── EmulateArray.h │ │ │ │ │ │ └── MaxSizeVector.h │ │ │ │ │ ├── EulerAngles │ │ │ │ │ ├── FFT │ │ │ │ │ ├── IterativeSolvers │ │ │ │ │ ├── KroneckerProduct │ │ │ │ │ ├── LevenbergMarquardt │ │ │ │ │ ├── MPRealSupport │ │ │ │ │ ├── MatrixFunctions │ │ │ │ │ ├── MoreVectorization │ │ │ │ │ ├── NonLinearOptimization │ │ │ │ │ ├── NumericalDiff │ │ │ │ │ ├── OpenGLSupport │ │ │ │ │ ├── Polynomials │ │ │ │ │ ├── Skyline │ │ │ │ │ ├── SparseExtra │ │ │ │ │ ├── SpecialFunctions │ │ │ │ │ ├── Splines │ │ │ │ │ └── src/ │ │ │ │ │ ├── AutoDiff/ │ │ │ │ │ │ ├── AutoDiffJacobian.h │ │ │ │ │ │ ├── AutoDiffScalar.h │ │ │ │ │ │ ├── AutoDiffVector.h │ │ │ │ │ │ └── InternalHeaderCheck.h │ │ │ │ │ ├── BVH/ │ │ │ │ │ │ ├── BVAlgorithms.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── KdBVH.h │ │ │ │ │ ├── Eigenvalues/ │ │ │ │ │ │ ├── ArpackSelfAdjointEigenSolver.h │ │ │ │ │ │ └── InternalHeaderCheck.h │ │ │ │ │ ├── EulerAngles/ │ │ │ │ │ │ ├── EulerAngles.h │ │ │ │ │ │ ├── EulerSystem.h │ │ │ │ │ │ └── InternalHeaderCheck.h │ │ │ │ │ ├── FFT/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── ei_fftw_impl.h │ │ │ │ │ │ └── ei_kissfft_impl.h │ │ │ │ │ ├── IterativeSolvers/ │ │ │ │ │ │ ├── ConstrainedConjGrad.h │ │ │ │ │ │ ├── DGMRES.h │ │ │ │ │ │ ├── GMRES.h │ │ │ │ │ │ ├── IDRS.h │ │ │ │ │ │ ├── IncompleteLU.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── IterationController.h │ │ │ │ │ │ ├── MINRES.h │ │ │ │ │ │ └── Scaling.h │ │ │ │ │ ├── KroneckerProduct/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── KroneckerTensorProduct.h │ │ │ │ │ ├── LevenbergMarquardt/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── LMcovar.h │ │ │ │ │ │ ├── LMonestep.h │ │ │ │ │ │ ├── LMpar.h │ │ │ │ │ │ ├── LMqrsolv.h │ │ │ │ │ │ └── LevenbergMarquardt.h │ │ │ │ │ ├── MatrixFunctions/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── MatrixExponential.h │ │ │ │ │ │ ├── MatrixFunction.h │ │ │ │ │ │ ├── MatrixLogarithm.h │ │ │ │ │ │ ├── MatrixPower.h │ │ │ │ │ │ ├── MatrixSquareRoot.h │ │ │ │ │ │ └── StemFunction.h │ │ │ │ │ ├── MoreVectorization/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── MathFunctions.h │ │ │ │ │ ├── NonLinearOptimization/ │ │ │ │ │ │ ├── HybridNonLinearSolver.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── LevenbergMarquardt.h │ │ │ │ │ │ ├── chkder.h │ │ │ │ │ │ ├── covar.h │ │ │ │ │ │ ├── dogleg.h │ │ │ │ │ │ ├── fdjac1.h │ │ │ │ │ │ ├── lmpar.h │ │ │ │ │ │ ├── qrsolv.h │ │ │ │ │ │ ├── r1mpyq.h │ │ │ │ │ │ ├── r1updt.h │ │ │ │ │ │ └── rwupdt.h │ │ │ │ │ ├── NumericalDiff/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ └── NumericalDiff.h │ │ │ │ │ ├── Polynomials/ │ │ │ │ │ │ ├── Companion.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── PolynomialSolver.h │ │ │ │ │ │ └── PolynomialUtils.h │ │ │ │ │ ├── Skyline/ │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── SkylineInplaceLU.h │ │ │ │ │ │ ├── SkylineMatrix.h │ │ │ │ │ │ ├── SkylineMatrixBase.h │ │ │ │ │ │ ├── SkylineProduct.h │ │ │ │ │ │ ├── SkylineStorage.h │ │ │ │ │ │ └── SkylineUtil.h │ │ │ │ │ ├── SparseExtra/ │ │ │ │ │ │ ├── BlockSparseMatrix.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── MarketIO.h │ │ │ │ │ │ ├── MatrixMarketIterator.h │ │ │ │ │ │ └── RandomSetter.h │ │ │ │ │ ├── SpecialFunctions/ │ │ │ │ │ │ ├── BesselFunctionsArrayAPI.h │ │ │ │ │ │ ├── BesselFunctionsBFloat16.h │ │ │ │ │ │ ├── BesselFunctionsFunctors.h │ │ │ │ │ │ ├── BesselFunctionsHalf.h │ │ │ │ │ │ ├── BesselFunctionsImpl.h │ │ │ │ │ │ ├── BesselFunctionsPacketMath.h │ │ │ │ │ │ ├── HipVectorCompatibility.h │ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ │ ├── SpecialFunctionsArrayAPI.h │ │ │ │ │ │ ├── SpecialFunctionsBFloat16.h │ │ │ │ │ │ ├── SpecialFunctionsFunctors.h │ │ │ │ │ │ ├── SpecialFunctionsHalf.h │ │ │ │ │ │ ├── SpecialFunctionsImpl.h │ │ │ │ │ │ ├── SpecialFunctionsPacketMath.h │ │ │ │ │ │ └── arch/ │ │ │ │ │ │ ├── AVX/ │ │ │ │ │ │ │ ├── BesselFunctions.h │ │ │ │ │ │ │ └── SpecialFunctions.h │ │ │ │ │ │ ├── AVX512/ │ │ │ │ │ │ │ ├── BesselFunctions.h │ │ │ │ │ │ │ └── SpecialFunctions.h │ │ │ │ │ │ ├── GPU/ │ │ │ │ │ │ │ └── SpecialFunctions.h │ │ │ │ │ │ └── NEON/ │ │ │ │ │ │ ├── BesselFunctions.h │ │ │ │ │ │ └── SpecialFunctions.h │ │ │ │ │ └── Splines/ │ │ │ │ │ ├── InternalHeaderCheck.h │ │ │ │ │ ├── Spline.h │ │ │ │ │ ├── SplineFitting.h │ │ │ │ │ └── SplineFwd.h │ │ │ │ ├── bench/ │ │ │ │ │ └── bench_svd.cpp │ │ │ │ ├── doc/ │ │ │ │ │ ├── Overview.dox │ │ │ │ │ ├── SYCL.dox │ │ │ │ │ ├── eigendoxy_layout.xml.in │ │ │ │ │ └── examples/ │ │ │ │ │ ├── BVH_Example.cpp │ │ │ │ │ ├── EulerAngles.cpp │ │ │ │ │ ├── FFT.cpp │ │ │ │ │ ├── MatrixExponential.cpp │ │ │ │ │ ├── MatrixFunction.cpp │ │ │ │ │ ├── MatrixLogarithm.cpp │ │ │ │ │ ├── MatrixPower.cpp │ │ │ │ │ ├── MatrixPower_optimal.cpp │ │ │ │ │ ├── MatrixSine.cpp │ │ │ │ │ ├── MatrixSinh.cpp │ │ │ │ │ ├── MatrixSquareRoot.cpp │ │ │ │ │ ├── PolynomialSolver1.cpp │ │ │ │ │ ├── PolynomialUtils1.cpp │ │ │ │ │ └── SYCL/ │ │ │ │ │ └── CwiseMul.cpp │ │ │ │ └── test/ │ │ │ │ ├── BVH.cpp │ │ │ │ ├── EulerAngles.cpp │ │ │ │ ├── FFT.cpp │ │ │ │ ├── FFTW.cpp │ │ │ │ ├── NonLinearOptimization.cpp │ │ │ │ ├── NumericalDiff.cpp │ │ │ │ ├── alignedvector3.cpp │ │ │ │ ├── autodiff.cpp │ │ │ │ ├── autodiff_scalar.cpp │ │ │ │ ├── bessel_functions.cpp │ │ │ │ ├── cxx11_eventcount.cpp │ │ │ │ ├── cxx11_maxsizevector.cpp │ │ │ │ ├── cxx11_meta.cpp │ │ │ │ ├── cxx11_non_blocking_thread_pool.cpp │ │ │ │ ├── cxx11_runqueue.cpp │ │ │ │ ├── cxx11_tensor_argmax.cpp │ │ │ │ ├── cxx11_tensor_argmax_gpu.cu │ │ │ │ ├── cxx11_tensor_argmax_sycl.cpp │ │ │ │ ├── cxx11_tensor_assign.cpp │ │ │ │ ├── cxx11_tensor_block_access.cpp │ │ │ │ ├── cxx11_tensor_block_eval.cpp │ │ │ │ ├── cxx11_tensor_block_io.cpp │ │ │ │ ├── cxx11_tensor_broadcast_sycl.cpp │ │ │ │ ├── cxx11_tensor_broadcasting.cpp │ │ │ │ ├── cxx11_tensor_builtins_sycl.cpp │ │ │ │ ├── cxx11_tensor_cast_float16_gpu.cu │ │ │ │ ├── cxx11_tensor_casts.cpp │ │ │ │ ├── cxx11_tensor_chipping.cpp │ │ │ │ ├── cxx11_tensor_chipping_sycl.cpp │ │ │ │ ├── cxx11_tensor_comparisons.cpp │ │ │ │ ├── cxx11_tensor_complex_cwise_ops_gpu.cu │ │ │ │ ├── cxx11_tensor_complex_gpu.cu │ │ │ │ ├── cxx11_tensor_concatenation.cpp │ │ │ │ ├── cxx11_tensor_concatenation_sycl.cpp │ │ │ │ ├── cxx11_tensor_const.cpp │ │ │ │ ├── cxx11_tensor_contract_gpu.cu │ │ │ │ ├── cxx11_tensor_contract_sycl.cpp │ │ │ │ ├── cxx11_tensor_contraction.cpp │ │ │ │ ├── cxx11_tensor_convolution.cpp │ │ │ │ ├── cxx11_tensor_convolution_sycl.cpp │ │ │ │ ├── cxx11_tensor_custom_index.cpp │ │ │ │ ├── cxx11_tensor_custom_op.cpp │ │ │ │ ├── cxx11_tensor_custom_op_sycl.cpp │ │ │ │ ├── cxx11_tensor_device.cu │ │ │ │ ├── cxx11_tensor_device_sycl.cpp │ │ │ │ ├── cxx11_tensor_dimension.cpp │ │ │ │ ├── cxx11_tensor_empty.cpp │ │ │ │ ├── cxx11_tensor_executor.cpp │ │ │ │ ├── cxx11_tensor_expr.cpp │ │ │ │ ├── cxx11_tensor_fft.cpp │ │ │ │ ├── cxx11_tensor_fixed_size.cpp │ │ │ │ ├── cxx11_tensor_forced_eval.cpp │ │ │ │ ├── cxx11_tensor_forced_eval_sycl.cpp │ │ │ │ ├── cxx11_tensor_generator.cpp │ │ │ │ ├── cxx11_tensor_generator_sycl.cpp │ │ │ │ ├── cxx11_tensor_gpu.cu │ │ │ │ ├── cxx11_tensor_ifft.cpp │ │ │ │ ├── cxx11_tensor_image_op_sycl.cpp │ │ │ │ ├── cxx11_tensor_image_patch.cpp │ │ │ │ ├── cxx11_tensor_image_patch_sycl.cpp │ │ │ │ ├── cxx11_tensor_index_list.cpp │ │ │ │ ├── cxx11_tensor_inflation.cpp │ │ │ │ ├── cxx11_tensor_inflation_sycl.cpp │ │ │ │ ├── cxx11_tensor_intdiv.cpp │ │ │ │ ├── cxx11_tensor_io.cpp │ │ │ │ ├── cxx11_tensor_layout_swap.cpp │ │ │ │ ├── cxx11_tensor_layout_swap_sycl.cpp │ │ │ │ ├── cxx11_tensor_lvalue.cpp │ │ │ │ ├── cxx11_tensor_map.cpp │ │ │ │ ├── cxx11_tensor_math.cpp │ │ │ │ ├── cxx11_tensor_math_sycl.cpp │ │ │ │ ├── cxx11_tensor_mixed_indices.cpp │ │ │ │ ├── cxx11_tensor_morphing.cpp │ │ │ │ ├── cxx11_tensor_morphing_sycl.cpp │ │ │ │ ├── cxx11_tensor_move.cpp │ │ │ │ ├── cxx11_tensor_notification.cpp │ │ │ │ ├── cxx11_tensor_of_bfloat16_gpu.cu │ │ │ │ ├── cxx11_tensor_of_complex.cpp │ │ │ │ ├── cxx11_tensor_of_const_values.cpp │ │ │ │ ├── cxx11_tensor_of_float16_gpu.cu │ │ │ │ ├── cxx11_tensor_of_strings.cpp │ │ │ │ ├── cxx11_tensor_padding.cpp │ │ │ │ ├── cxx11_tensor_padding_sycl.cpp │ │ │ │ ├── cxx11_tensor_patch.cpp │ │ │ │ ├── cxx11_tensor_patch_sycl.cpp │ │ │ │ ├── cxx11_tensor_random.cpp │ │ │ │ ├── cxx11_tensor_random_gpu.cu │ │ │ │ ├── cxx11_tensor_random_sycl.cpp │ │ │ │ ├── cxx11_tensor_reduction.cpp │ │ │ │ ├── cxx11_tensor_reduction_gpu.cu │ │ │ │ ├── cxx11_tensor_reduction_sycl.cpp │ │ │ │ ├── cxx11_tensor_ref.cpp │ │ │ │ ├── cxx11_tensor_reverse.cpp │ │ │ │ ├── cxx11_tensor_reverse_sycl.cpp │ │ │ │ ├── cxx11_tensor_roundings.cpp │ │ │ │ ├── cxx11_tensor_scan.cpp │ │ │ │ ├── cxx11_tensor_scan_gpu.cu │ │ │ │ ├── cxx11_tensor_scan_sycl.cpp │ │ │ │ ├── cxx11_tensor_shuffling.cpp │ │ │ │ ├── cxx11_tensor_shuffling_sycl.cpp │ │ │ │ ├── cxx11_tensor_simple.cpp │ │ │ │ ├── cxx11_tensor_striding.cpp │ │ │ │ ├── cxx11_tensor_striding_sycl.cpp │ │ │ │ ├── cxx11_tensor_sugar.cpp │ │ │ │ ├── cxx11_tensor_sycl.cpp │ │ │ │ ├── cxx11_tensor_symmetry.cpp │ │ │ │ ├── cxx11_tensor_thread_local.cpp │ │ │ │ ├── cxx11_tensor_thread_pool.cpp │ │ │ │ ├── cxx11_tensor_trace.cpp │ │ │ │ ├── cxx11_tensor_uint128.cpp │ │ │ │ ├── cxx11_tensor_volume_patch.cpp │ │ │ │ ├── cxx11_tensor_volume_patch_sycl.cpp │ │ │ │ ├── dgmres.cpp │ │ │ │ ├── forward_adolc.cpp │ │ │ │ ├── gmres.cpp │ │ │ │ ├── idrs.cpp │ │ │ │ ├── kronecker_product.cpp │ │ │ │ ├── levenberg_marquardt.cpp │ │ │ │ ├── matrix_exponential.cpp │ │ │ │ ├── matrix_function.cpp │ │ │ │ ├── matrix_functions.h │ │ │ │ ├── matrix_power.cpp │ │ │ │ ├── matrix_square_root.cpp │ │ │ │ ├── minres.cpp │ │ │ │ ├── mpreal_support.cpp │ │ │ │ ├── openglsupport.cpp │ │ │ │ ├── polynomialsolver.cpp │ │ │ │ ├── polynomialutils.cpp │ │ │ │ ├── sparse_extra.cpp │ │ │ │ ├── special_functions.cpp │ │ │ │ ├── special_packetmath.cpp │ │ │ │ └── splines.cpp │ │ │ └── pcg32/ │ │ │ └── pcg32.h │ │ ├── pybind_api.h │ │ ├── ray_sampler_header.h │ │ └── raymarch_shared.h │ ├── setup.py │ └── src/ │ ├── calc_rgb.cu │ ├── compacted_coord.cu │ ├── ema_grid_samples_nerf.cu │ ├── generate_grid_samples_nerf_nonuniform.cu │ ├── mark_untrained_density_grid.cu │ ├── pybind_api.cu │ ├── ray_sampler.cu │ ├── splat_grid_samples_nerf_max_nearest_neighbor.cu │ └── update_bitfield.cu ├── requirements.txt ├── run_nerf.py ├── setup.py ├── test/ │ ├── apis/ │ │ └── test_helper.py │ ├── datasets/ │ │ ├── data/ │ │ │ └── nerf_synthetic/ │ │ │ └── lego/ │ │ │ ├── transforms_test.json │ │ │ ├── transforms_train.json │ │ │ └── transforms_val.json │ │ ├── test_dataset.py │ │ ├── test_load.py │ │ └── test_pipeline.py │ └── models/ │ ├── animatable_nerf/ │ │ ├── test_an_network.py │ │ ├── test_an_render.py │ │ ├── test_deform_mlps.py │ │ └── test_human_mlps.py │ ├── gnr/ │ │ ├── test_gnr_mlps.py │ │ └── test_gnr_network.py │ ├── hashnerf/ │ │ └── test_hashnerf_network.py │ ├── mipnerf/ │ │ └── test_mipnerf_network.py │ ├── nerf/ │ │ ├── test_nerf_embedder.py │ │ ├── test_nerf_mlps.py │ │ ├── test_nerf_network.py │ │ └── test_nerf_render.py │ └── neuralbody/ │ ├── test_nb_embedder.py │ ├── test_nb_mlps.py │ ├── test_nb_network.py │ └── test_nb_render.py ├── tools/ │ └── convert_blender_data.py ├── train.sh ├── train_mvs.sh └── xrnerf/ ├── core/ │ ├── __init__.py │ ├── apis/ │ │ ├── __init__.py │ │ ├── api.py │ │ ├── helper.py │ │ ├── test.py │ │ └── train.py │ ├── hooks/ │ │ ├── __init__.py │ │ ├── build_occupancy_tree_hook.py │ │ ├── distill_cycle_hook.py │ │ ├── hash_hook.py │ │ ├── save_distill_results_hook.py │ │ ├── test_hooks.py │ │ ├── train_hooks.py │ │ ├── utils.py │ │ └── validation_hooks.py │ └── runner/ │ ├── __init__.py │ ├── base.py │ ├── bungeenerf_runner.py │ └── kilonerf_runner.py ├── datasets/ │ ├── __init__.py │ ├── aninerf_dataset.py │ ├── base.py │ ├── builder.py │ ├── bungee_dataset.py │ ├── genebody_dataset.py │ ├── hashnerf_dataset.py │ ├── kilonerf_dataset.py │ ├── kilonerf_node_dataset.py │ ├── load_data/ │ │ ├── __init__.py │ │ ├── get_rays.py │ │ ├── load.py │ │ ├── load_LINEMOD.py │ │ ├── load_blender.py │ │ ├── load_deepvoxels.py │ │ ├── load_llff.py │ │ ├── load_multiscale.py │ │ ├── load_multiscale_google.py │ │ └── load_nsvf_dataset.py │ ├── mip_multiscale_dataset.py │ ├── neuralbody_dataset.py │ ├── pipelines/ │ │ ├── __init__.py │ │ ├── augment.py │ │ ├── compose.py │ │ ├── create.py │ │ └── transforms.py │ ├── samplers/ │ │ ├── __init__.py │ │ └── distributed_sampler.py │ ├── scene_dataset.py │ └── utils/ │ ├── __init__.py │ ├── aninerf.py │ ├── flatten.py │ ├── genebody.py │ ├── hashnerf.py │ └── novel_view.py ├── models/ │ ├── __init__.py │ ├── builder.py │ ├── embedders/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── bungee_embedder.py │ │ ├── gnr_embedder.py │ │ ├── kilonerf_fourier_embedder.py │ │ ├── mipnerf_embedder.py │ │ └── neuralbody_embedder.py │ ├── mlps/ │ │ ├── __init__.py │ │ ├── aninerf_mlp.py │ │ ├── base.py │ │ ├── bungeenerf_mlp.py │ │ ├── gnr_mlp.py │ │ ├── hashnerf_mlp.py │ │ ├── kilonerf_mlp.py │ │ ├── kilonerf_multinet.py │ │ ├── multi_modules.py │ │ ├── nb_mlp.py │ │ └── nerf_mlp.py │ ├── networks/ │ │ ├── __init__.py │ │ ├── aninerf.py │ │ ├── base.py │ │ ├── bungeenerf.py │ │ ├── gnr.py │ │ ├── hashnerf.py │ │ ├── kilonerf.py │ │ ├── mipnerf.py │ │ ├── nerf.py │ │ ├── neuralbody.py │ │ ├── student_nerf.py │ │ └── utils/ │ │ ├── __init__.py │ │ ├── aninerf.py │ │ ├── batching.py │ │ ├── gnr.py │ │ ├── hierarchical_sample.py │ │ ├── metrics.py │ │ ├── mip.py │ │ └── transforms.py │ ├── renders/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── bungeenerf_render.py │ │ ├── gnr_render.py │ │ ├── hashnerf_render.py │ │ ├── kilonerf_simple_render.py │ │ ├── mipnerf_render.py │ │ └── nerf_render.py │ └── samplers/ │ ├── __init__.py │ ├── ngp_grid_sampler.py │ └── utils/ │ ├── __init__.py │ ├── compacted_coords.py │ ├── ema_grid_samples_nerf.py │ ├── generate_grid_samples_nerf_nonuniform.py │ ├── mark_untrained_density_grid.py │ ├── rays_sampler.py │ ├── splat_grid_samples_nerf_max_nearest_neighbor.py │ └── update_bitfield.py └── utils/ ├── __init__.py ├── data_helper.py └── logger.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/build.yml ================================================ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions name: build on: push: # 触发条件之一为push到main分支,若改动仅存在于docs目录,或README.md文件,则忽略,避免触发。 branches: - main - alpha_test paths-ignore: - 'README.md' - 'README_CN.md' - 'docs/**' pull_request: # 触发条件之一为该commit属于某个PR,忽略条件同上。 paths-ignore: - 'README.md' - 'README_CN.md' - 'docs/**' concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: build_test: runs-on: ubuntu-18.04 steps: - uses: actions/checkout@v2 - name: Set up Python 3.7 uses: actions/setup-python@v2 with: python-version: 3.7 - name: Install Env run: | pip install coverage pytest pip install torch==1.10.0 # pip install lpips trimesh smplx -i https://pypi.tuna.tsinghua.edu.cn/simple # pip install torch numpy mmcv -i https://pypi.tuna.tsinghua.edu.cn/simple # pip install opencv-python>=3 yapf imageio scikit-image -i https://pypi.tuna.tsinghua.edu.cn/simple coverage run --source xrnerf/models -m pytest -s test/models coverage xml coverage report -m - name: Upload coverage to Codecov # 上传覆盖率报告 uses: codecov/codecov-action@v2 with: files: ./coverage.xml flags: unittests env_vars: OS,PYTHON name: codecov-umbrella fail_ci_if_error: false ================================================ FILE: .github/workflows/lint.yml ================================================ name: lint on: [push, pull_request] concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: lint: runs-on: ubuntu-18.04 steps: - uses: actions/checkout@v2 - name: Set up Python 3.7 uses: actions/setup-python@v2 with: python-version: 3.7 - name: Install pre-commit hook run: | sudo apt-add-repository ppa:brightbox/ruby-ng -y sudo apt-get update sudo apt-get install -y ruby2.7 pip install pre-commit pre-commit install - name: Linting run: pre-commit run --files xrnerf/* - name: Check docstring coverage run: | pip install interrogate interrogate -vinmMI --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" -f 60 xrnerf/core ================================================ FILE: .gitignore ================================================ /__pycache__/ /data/ /scripts/ /work_dirs/ /build/ sftp-config.json push.sh *.pyc *.log *.egg *.egg-info *.so *.o *.mp4 /data/ .coverage /.pytest_cache/ ================================================ FILE: .pre-commit-config.yaml ================================================ exclude: ^tests/data/ repos: - repo: https://github.com/pycqa/flake8.git rev: 3.8.3 hooks: - id: flake8 - repo: https://github.com/LOTEAT/isort rev: 5.10.1 hooks: - id: isort - repo: https://github.com/pre-commit/mirrors-yapf rev: v0.30.0 hooks: - id: yapf - repo: https://github.com/pre-commit/pre-commit-hooks rev: v3.1.0 hooks: - id: trailing-whitespace - id: check-yaml - id: end-of-file-fixer - id: requirements-txt-fixer - id: double-quote-string-fixer - id: check-merge-conflict - id: fix-encoding-pragma args: ["--remove"] - id: mixed-line-ending args: ["--fix=lf"] - repo: https://github.com/myint/docformatter rev: v1.3.1 hooks: - id: docformatter args: ["--in-place", "--wrap-descriptions", "79"] - repo: https://github.com/codespell-project/codespell rev: v2.1.0 hooks: - id: codespell args: ["--skip", "*.ipynb,tools/data/hvu/label_map.json", "-L", "te,nd,thre,Gool,gool"] - repo: https://github.com/open-mmlab/pre-commit-hooks rev: v0.2.0 # Use the ref you want to point at hooks: - id: check-algo-readme - id: check-copyright args: ["mmaction", "tests", "demo", "tools"] # these directories will be checked ================================================ FILE: LICENSE ================================================ Copyright 2022 XRNerf Authors. All rights reserved. Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2022 XRNerf Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================ # XRNeRF
[![actions](https://github.com/openxrlab/xrnerf/workflows/build/badge.svg)](https://github.com/openxrlab/xrnerf/actions) [![LICENSE](https://img.shields.io/github/license/openxrlab/xrnerf.svg)](https://github.com/openxrlab/xrnerf/blob/main/LICENSE)
## Introduction English | [简体中文](README_CN.md) XRNeRF is an open-source PyTorch-based codebase for Neural Radiance Field (NeRF). It is a part of the [OpenXRLab](https://github.com/orgs/openxrlab/repositories) project. https://user-images.githubusercontent.com/24294293/187131048-5977c929-e136-4328-ad1f-7da8e7a566ff.mp4 This page provides basic tutorials about the usage of XRNeRF. For installation instructions, please see [installation.md](docs/en/installation.md). - [XRNeRF](#xrnerf) - [Introduction](#introduction) - [Benchmark](#benchmark) - [Datasets](#datasets) - [Installation](#installation) - [Build a Model](#build-a-model) - [Basic Concepts](#basic-concepts) - [Write a new network](#write-a-new-network) - [Train a Model](#train-a-model) - [Iteration Controls](#iteration-controls) - [Train](#train) - [Test](#test) - [Tutorials](#tutorials) - [Other Documents](#other-documents) - [Citation](#citation) - [License](#license) - [Contributing](#contributing) - [Acknowledgement](#acknowledgement) - [Projects in OpenXRLab](#projects-in-openxrlab) ## Benchmark More details can be found in [benchmark.md](docs/en/benchmark.md). Supported scene-NeRF methods:
(click to collapse) - [X] [NeRF](https://www.matthewtancik.com/nerf) (ECCV'2020) - [X] [Mip-NeRF](https://jonbarron.info/mipnerf/) (ICCV'2021) - [X] [KiloNeRF](https://arxiv.org/abs/2103.13744) (ICCV'2021) - [X] [Instant NGP](https://nvlabs.github.io/instant-ngp/) (SIGGRAPH'2022) - [X] [BungeeNeRF](https://city-super.github.io/citynerf/) (ECCV'2022) Supported human-NeRF methods:
(click to collapse) - [X] [NeuralBody](https://zju3dv.github.io/neuralbody) (CVPR'2021) - [X] [AniNeRF](https://zju3dv.github.io/animatable_nerf/) (ICCV'2021) - [X] [GNR](https://generalizable-neural-performer.github.io/) Wanna see more methods supported? Post method you want see in XRNeRF on our [wishlist](https://github.com/openxrlab/xrnerf/discussions/11).
## Datasets It is recommended to symlink the dataset root to `$PROJECT/data`. If your folder structure is different, you may need to change the corresponding paths in config files. ``` xrnerf ├── xrnerf ├── docs ├── configs ├── test ├── extensions ├── data │ ├── nerf_llff_data │ ├── nerf_synthetic │ ├── multiscale │ ├── multiscale_google │ ├── ... ``` For more information on data preparation, please see [dataset_preparation.md](docs/en/dataset_preparation.md) ## Installation We provide detailed [installation tutorial](docs/en/installation.md) for XRNeRF, users can install from scratch or use provided [dockerfile](docker/Dockerfile). It is recommended to start by creating a docker image: ```shell docker build -f ./docker/Dockerfile --rm -t xrnerf . ``` For more information, please follow our [installation tutorial](docs/en/installation.md). ## Build a Model ### Basic Concepts In XRNeRF, model components are basically categorized as 4 types. - network: the whole nerf model pipeline, usually contains a embedder, mlp and render. - embedder: convert point-position and viewdirection data into embedded data, embedder can be function only or with trainable paramters. - mlp: use the output of embedder as input, and output raw data (the rgb and density value at sampled position) for render, usually contains FC layers. - render: receive mlp's raw data, output the rgb value at a pixel. Following some basic pipelines (e.g., `NerfNetwork`), the model structure can be customized through config files with no pains. ### Write a new network To write a new nerf network, you need to inherit from `BaseNerfNetwork`, which defines the following abstract methods. - `train_step()`: forward method of the training mode. - `val_step()`: forward method of the testing mode. [NerfNetwork](xrnerf/models/networks/nerf.py) is a good example which show how to do that. To be specific, if we want to implement some new components, there are several things to do. 1. create a new file in `xrnerf/models/networks/my_networks.py`. ```python from ..builder import NETWORKS from .nerf import NerfNetwork @NETWORKS.register_module() class MyNerfNetwork(NerfNetwork): def __init__(self, cfg, mlp=None, mlp_fine=None, render=None): super().__init__(cfg, mlp, mlp_fine, render) def forward(self, data): .... def train_step(self, data, optimizer, **kwargs): .... def val_step(self, data, optimizer=None, **kwargs): .... ``` 2. Import the module in `xrnerf/models/networks/__init__.py` ```python from .my_networks import MyNerfNetwork ``` 3. modify the [config file](configs/nerf/nerf_blender_base01.py) from ```python model = dict( type='NerfNetwork', .... ``` to ```python model = dict( type='MyNerfNetwork', .... ``` To implement some new components for embedder/mlp/render, procedure is similar to above. * To write a new nerf embedder, you need to inherit from `nn.Module` or `BaseEmbedder`, and define the `forward` method. [BaseEmbedder](xrnerf/models/embedders/base.py) is a good example. * To write a new nerf mlp, you need to inherit from `nn.Module` or `BaseMLP`, and define the `forward` method. [NerfMLP](xrnerf/models/mlps/nerf_mlp.py) is a good example. * To write a new nerf render, you need to inherit from `nn.Module` or `BaseRender`, and define the `forward` method. [NerfRender](xrnerf/models/renders/nerf_render.py) is a good example. ## Train a Model ### Iteration Controls XRNeRF use `mmcv.runner.IterBasedRunner` to control training, and `mmcv.runner.EpochBasedRunner` to for test mode. In training mode, the `max_iters` in config file decide how many iters. In test mode, `max_iters` is forced to change to 1, which represents only 1 epoch to test. ### Train ```shell python run_nerf.py --config configs/nerf/nerf_blender_base01.py --dataname lego ``` Arguments are: - `--config`: config file path. - `--dataname`: select which data under dataset directory. ### Test We have provided model ``iter_200000.pth`` for test, download from [here](https://drive.google.com/file/d/147wRy3TFlRVrZdWqAgHNak7s6jiMZA1-/view?usp=sharing) ```shell python run_nerf.py --config configs/nerf/nerf_blender_base01.py --dataname lego --test_only --load_from iter_200000.pth ``` Arguments are: - `--config`: config file path. - `--dataname`: select which data under dataset directory. - `--test_only`: influence on whole testset once. - `--load_from`: load which checkpoint to test, this will overwrite the original `load_from` in config file to for convenience. ## Tutorials Currently, we provide some tutorials for users to * [learn about configs](docs/en/tutorials/config.md) * [customize data pipelines](docs/en/tutorials/data_pipeline.md) * [model definition](docs/en/tutorials/model.md) ## Other Documents Except for that,The document also includes the following * [api](docs/en/api.md) * [dataset](docs/en/dataset_preparation.md) * [installation](docs/en/installation.md) * [benchmark](docs/en/benchmark.md) * [FAQ](docs/en/faq.md) ## Citation If you find this project useful in your research, please consider cite: ```bibtex @misc{xrnerf, title={OpenXRLab Neural Radiance Field Toolbox and Benchmark}, author={XRNeRF Contributors}, howpublished = {\url{https://github.com/openxrlab/xrnerf}}, year={2022} } ``` ## License The license of our codebase is [Apache-2.0](LICENSE). Note that this license only applies to code in our library, the dependencies of which are separate and individually licensed. We would like to pay tribute to open-source implementations to which we rely on. Please be aware that using the content of dependencies may affect the license of our codebase. Some supported methods may carry [additional licenses](docs/en/additional_licenses.md). ## Contributing We appreciate all contributions to improve XRNeRF. Please refer to [CONTRIBUTING.md](docs/en/CONTRIBUTING.md) for the contributing guideline. ## Acknowledgement XRNeRF is an open source project that is contributed by researchers and engineers from both the academia and the industry. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks. We wish that the framework and benchmark could serve the growing research community by providing a flexible framework to reimplement existing methods and develop their own new models. ## Projects in OpenXRLab - [XRPrimer](https://github.com/openxrlab/xrprimer): OpenXRLab foundational library for XR-related algorithms. - [XRSLAM](https://github.com/openxrlab/xrslam): OpenXRLab Visual-inertial SLAM Toolbox and Benchmark. - [XRSfM](https://github.com/openxrlab/xrsfm): OpenXRLab Structure-from-Motion Toolbox and Benchmark. - [XRLocalization](https://github.com/openxrlab/xrlocalization): OpenXRLab Visual Localization Toolbox and Server. - [XRMoCap](https://github.com/openxrlab/xrmocap): OpenXRLab Multi-view Motion Capture Toolbox and Benchmark. - [XRMoGen](https://github.com/openxrlab/xrmogen): OpenXRLab Human Motion Generation Toolbox and Benchmark. - [XRNeRF](https://github.com/openxrlab/xrnerf): OpenXRLab Neural Radiance Field (NeRF) Toolbox and Benchmark. ================================================ FILE: README_CN.md ================================================ # XRNeRF
[![actions](https://github.com/openxrlab/xrnerf/workflows/build/badge.svg)](https://github.com/openxrlab/xrnerf/actions) [![LICENSE](https://img.shields.io/github/license/openxrlab/xrnerf.svg)](https://github.com/openxrlab/xrnerf/blob/main/LICENSE)
## 简介 简体中文 | [English](README.md) 本文档提供 XRNeRF 相关用法的基本教程。对于安装说明,请参阅 [安装指南](docs/zh_cn/installation.md)。 - [XRNeRF](#xrnerf) - [简介](#简介) - [基准](#基准) - [数据集](#数据集) - [安装](#安装) - [创建模型](#创建模型) - [基本概念](#基本概念) - [自定义一个新模型](#自定义一个新模型) - [训练](#训练) - [迭代次数控制](#迭代次数控制) - [训练命令](#训练命令) - [测试](#测试) - [详细教程](#详细教程) - [引用](#引用) - [参与贡献](#参与贡献) - [致谢](#致谢) - [OpenXRLab中的其他项目](#openxrlab中的其他项目) ## 基准 更多细节可查看 [benchmark.md](docs/en/benchmark.md). 支持的场景类神经渲染方法如下:
(click to collapse) - [X] [NeRF](https://www.matthewtancik.com/nerf) (ECCV'2020) - [X] [Mip-NeRF](https://jonbarron.info/mipnerf/) (ICCV'2021) - [X] [KiloNeRF](https://arxiv.org/abs/2103.13744) (ICCV'2021) - [X] [Instant NGP](https://nvlabs.github.io/instant-ngp/) (SIGGRAPH'2022) - [X] [BungeeNeRF](https://city-super.github.io/citynerf/) (ECCV'2022) 支持的人体类神经渲染方法如下:
(click to collapse) - [X] [NeuralBody](https://zju3dv.github.io/neuralbody) (CVPR'2021) - [X] [AniNeRF](https://zju3dv.github.io/animatable_nerf/) (ICCV'2021) - [X] [GNR](https://generalizable-neural-performer.github.io/) 如果期望在XRNeRF中看到新的NeRF方法,可以张贴在[愿望清单](https://github.com/openxrlab/xrnerf/discussions/11),我们会根据社区投票意见来安排下一步的计划。 ## 数据集 我们推荐把数据集放在`项目目录/data`下面,否则可能需要修改config中的内容 ``` xrnerf ├── xrnerf ├── docs ├── configs ├── test ├── extensions ├── data │ ├── nerf_llff_data │ ├── nerf_synthetic │ ├── multiscale │ ├── multiscale_google │ ├── ... ``` 请参阅 [数据集准备](docs/zh_cn/dataset_preparation.md) 获取数据集准备的相关信息。 ## 安装 安装方法详见[教程](docs/zh_cn/installation.md), 我们还提供了[docker镜像文件](docker/DockerfileCN)作为另一种环境安装方式。 ## 创建模型 ### 基本概念 在XRNeRF中,模型被分为4个部分 - embedder: 输入点的位置和视角,输出embedded特征数据,embedder可能是纯函数型的,或者带有可学习参数的 - mlp: 使用embedder的输出作为输入,输出原始的点数据(采样点的rgb值和密度值)送给render, 一般由多层感知机组成 - render: 获取mlp的输出数据,沿着射线上的点进行积分等操作,输出图像上一个像素点的rgb值 - network: 将以上三个部分组织起来,同时也是与mmcv的runner进行交互的部分,控制了训练时的loss计算和验证时的指标计算 对于上述所有模型而言,输入都是一个字典类型的`data`。模型使用字典`data`中的内容来创建新的键值对,并加入`data`。以[origin nerf](configs/nerf/nerf_blender_base01.py)为例,最开始的`data`应该包含`pts`(尺寸为 n_rays, n_pts, 3) and `viewdirs`(尺寸为 n_rays, n_pts, 3). ### 自定义一个新模型 如果要自定义一个network,需要继承`BaseNerfNetwork`,其中定义了两个抽象方法 - `train_step()`: training 模式下的推理和计算loss的函数. - `val_step()`: testing 模式下的推理函数. [NerfNetwork](xrnerf/models/networks/nerf.py) 是一个很好的例子 具体而言,如果想要实现一个具有新feature的nerf方法,有以下几步需要做 1. 创建一个新文件如 `xrnerf/models/networks/my_networks.py`. ```python from ..builder import NETWORKS from .nerf import NerfNetwork @NETWORKS.register_module() class MyNerfNetwork(NerfNetwork): def __init__(self, cfg, mlp=None, mlp_fine=None, render=None): super().__init__(cfg, mlp, mlp_fine, render) def forward(self, data): .... def train_step(self, data, optimizer, **kwargs): .... def val_step(self, data, optimizer=None, **kwargs): .... ``` 2. 修改 `xrnerf/models/networks/__init__.py` 文件 ```python from .my_networks import MyNerfNetwork ``` 3. 修改配置文件[config file](configs/nerf/nerf_blender_base01.py) 原来 ```python model = dict( type='NerfNetwork', .... ``` 现在 ```python model = dict( type='MyNerfNetwork', .... ``` 同样的,要实现embedder/mlp/render的新功能,步骤与上述类似 * 要定义一个新的embedder, 需要继承`nn.Module` 或者 `BaseEmbedder`, 并定义 `forward` 方法. [BaseEmbedder](xrnerf/models/embedders/base.py) 是个很好的例子 * 要定义一个新的mlp, 需要继承 `nn.Module` 或者 `BaseMLP`, 并定义 `forward` 方法. [NerfMLP](xrnerf/models/mlps/nerf_mlp.py) 可供参考 * 要定义一个新的render, 需要继承 `nn.Module` 或者 `BaseRender`, 并定义 `forward` 方法. [NerfRender](xrnerf/models/renders/nerf_render.py) 可供参考 ## 训练 ### 迭代次数控制 XRnerf 使用 `mmcv.runner.IterBasedRunner` 来控制训练, 并用 `mmcv.runner.EpochBasedRunner` 来测试. 训练时, 配置文件的 `max_iters` 表示最多训练多少次. 测试时, `max_iters` 被强制改为1, 表示进行一次完整的epoch. ### 训练命令 ```shell python run_nerf.py --config configs/nerf/nerf_blender_base01.py --dataname lego ``` 参数为: - `--config`: 配置文件位置 - `--dataname`: 使用数据集下的哪个数据来训练 ### 测试 ```shell python run_nerf.py --config configs/nerf/nerf_blender_base01.py --dataname lego --test_only --load_from iter_200000.pth ``` 参数为: - `--config`: 配置文件位置 - `--dataname`: 使用数据集下的哪个数据 - `--test_only`: 切换为测试模式 - `--load_from`: 重载覆盖掉原来配置文件里的 `load_from`, 在某些情况下为了方便而使用 ## 详细教程 目前, XRNeRF 提供以下几种更详细的教程 * [如何编写配置文件](docs/zh_cn/tutorials/config.md) * [数据处理流程](docs/zh_cn/tutorials/data_pipeline.md) * [模型定义](docs/zh_cn/tutorials/model.md) 除此以外,文档还包括以下内容 * [api介绍](docs/zh_cn/api.md) * [数据集准备](docs/zh_cn/dataset_preparation.md) * [安装](docs/zh_cn/installation.md) * [benchmark](docs/en/benchmark.md) * [常见问题](docs/en/faq.md) ## 引用 ```bibtex @misc{xrnerf, title={OpenXRLab Neural Radiance Field Toolbox and Benchmark}, author={XRNeRF Contributors}, howpublished = {\url{https://github.com/openxrlab/xrnerf}}, year={2022} } ``` ## 参与贡献 我们非常欢迎用户对于 XRNeRF 做出的任何贡献,可以参考 [贡献指南](docs/en/CONTRIBUTING.md) 文件了解更多细节 ## 致谢 XRNeRF 是一款由不同学校和公司共同贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。 我们希望该工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现现有算法并开发自己的新模型,从而不断为开源社区提供贡献。 ## OpenXRLab中的其他项目 - [XRPrimer](https://github.com/openxrlab/xrprimer): OpenXRLab foundational library for XR-related algorithms. - [XRSLAM](https://github.com/openxrlab/xrslam): OpenXRLab Visual-inertial SLAM Toolbox and Benchmark. - [XRSfM](https://github.com/openxrlab/xrsfm): OpenXRLab Structure-from-Motion Toolbox and Benchmark. - [XRLocalization](https://github.com/openxrlab/xrlocalization): OpenXRLab Visual Localization Toolbox and Server. - [XRMoCap](https://github.com/openxrlab/xrmocap): OpenXRLab Multi-view Motion Capture Toolbox and Benchmark. - [XRMoGen](https://github.com/openxrlab/xrmogen): OpenXRLab Human Motion Generation Toolbox and Benchmark. - [XRNeRF](https://github.com/openxrlab/xrnerf): OpenXRLab Neural Radiance Field (NeRF) Toolbox and Benchmark. ================================================ FILE: configs/__init__.py ================================================ import importlib def load_configs(name): modellib = importlib.import_module(name) # print(configs.hmr_configs) return modellib # load_configs("train_configs") ================================================ FILE: configs/_base_/models/nerf.py ================================================ # # model settings # model = dict( # type='nerf', # i_embed=0, # set 0 for default positional encoding, -1 for none # multires=10, # log2 of max freq for positional encoding (3D location) # multires_views=4, # log2 of max freq for positional encoding (2D direction) # use_viewdirs=True, # use full 5D input instead of 3D # N_importance=0, # number of additional fine samples per ray # netdepth=8, # layers in network # netwidth=256, # channels per layer # netdepth_fine=8, # layers in fine network # netwidth_fine=256, # channels per layer in fine network # netchunk=1024*64, # number of pts sent through network in parallel, decrease if running out of memory # ) ================================================ FILE: configs/animatable_nerf/an_h36m_s11_novel_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'novel_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir_pattern = './work_dirs/animatable_nerf/h36m_s11_{}/' # noqa work_dir = './work_dirs/animatable_nerf/h36m_s11_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') os.system('mkdir -p {}'.format(work_dir)) load_from = os.path.join(work_dir, 'latest.pth') if not os.path.exists(load_from): ckpt_path = os.path.join(work_dir_pattern.format('train_pose'), 'latest.pth') os.system('cp {} {}'.format(ckpt_path, work_dir)) num_train_pose = 200 num_novel_pose = 82 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S11/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] novel_pose_frame=[ num_train_pose * frame_interval, (num_train_pose + num_novel_pose) * frame_interval ], frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_h36m_s11_train_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'train_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/animatable_nerf/h36m_s11_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_pose = 200 num_novel_pose = 82 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S11/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_h36m_s1_novel_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'novel_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir_pattern = './work_dirs/animatable_nerf/h36m_s1_{}/' # noqa work_dir = './work_dirs/animatable_nerf/h36m_s1_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') os.system('mkdir -p {}'.format(work_dir)) load_from = os.path.join(work_dir, 'latest.pth') if not os.path.exists(load_from): ckpt_path = os.path.join(work_dir_pattern.format('train_pose'), 'latest.pth') os.system('cp {} {}'.format(ckpt_path, work_dir)) num_train_pose = 150 num_novel_pose = 49 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S1/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] novel_pose_frame=[ num_train_pose * frame_interval, (num_train_pose + num_novel_pose) * frame_interval ], frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_h36m_s1_train_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'train_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/animatable_nerf/h36m_s1_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_pose = 150 num_novel_pose = 49 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S1/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_h36m_s5_novel_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'novel_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir_pattern = './work_dirs/animatable_nerf/h36m_s5_{}/' # noqa work_dir = './work_dirs/animatable_nerf/h36m_s5_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') os.system('mkdir -p {}'.format(work_dir)) load_from = os.path.join(work_dir, 'latest.pth') if not os.path.exists(load_from): ckpt_path = os.path.join(work_dir_pattern.format('train_pose'), 'latest.pth') os.system('cp {} {}'.format(ckpt_path, work_dir)) num_train_pose = 250 num_novel_pose = 127 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S5/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] novel_pose_frame=[ num_train_pose * frame_interval, (num_train_pose + num_novel_pose) * frame_interval ], frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_h36m_s5_train_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'train_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/animatable_nerf/h36m_s5_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_pose = 250 num_novel_pose = 127 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S5/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_h36m_s6_novel_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'novel_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir_pattern = './work_dirs/animatable_nerf/h36m_s6_{}/' # noqa work_dir = './work_dirs/animatable_nerf/h36m_s6_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') os.system('mkdir -p {}'.format(work_dir)) load_from = os.path.join(work_dir, 'latest.pth') if not os.path.exists(load_from): ckpt_path = os.path.join(work_dir_pattern.format('train_pose'), 'latest.pth') os.system('cp {} {}'.format(ckpt_path, work_dir)) num_train_pose = 150 num_novel_pose = 83 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S6/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] novel_pose_frame=[ num_train_pose * frame_interval, (num_train_pose + num_novel_pose) * frame_interval ], frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_h36m_s6_train_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'train_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/animatable_nerf/h36m_s6_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_pose = 150 num_novel_pose = 83 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S6/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_h36m_s7_novel_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'novel_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir_pattern = './work_dirs/animatable_nerf/h36m_s7_{}/' # noqa work_dir = './work_dirs/animatable_nerf/h36m_s7_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') os.system('mkdir -p {}'.format(work_dir)) load_from = os.path.join(work_dir, 'latest.pth') if not os.path.exists(load_from): ckpt_path = os.path.join(work_dir_pattern.format('train_pose'), 'latest.pth') os.system('cp {} {}'.format(ckpt_path, work_dir)) num_train_pose = 300 num_novel_pose = 200 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S7/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] novel_pose_frame=[ num_train_pose * frame_interval, (num_train_pose + num_novel_pose) * frame_interval ], frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_h36m_s7_train_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'train_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/animatable_nerf/h36m_s7_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_pose = 300 num_novel_pose = 200 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S7/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_h36m_s8_novel_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'novel_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir_pattern = './work_dirs/animatable_nerf/h36m_s8_{}/' # noqa work_dir = './work_dirs/animatable_nerf/h36m_s8_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') os.system('mkdir -p {}'.format(work_dir)) load_from = os.path.join(work_dir, 'latest.pth') if not os.path.exists(load_from): ckpt_path = os.path.join(work_dir_pattern.format('train_pose'), 'latest.pth') os.system('cp {} {}'.format(ckpt_path, work_dir)) num_train_pose = 250 num_novel_pose = 87 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S8/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] novel_pose_frame=[ num_train_pose * frame_interval, (num_train_pose + num_novel_pose) * frame_interval ], frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_h36m_s8_train_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'train_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/animatable_nerf/h36m_s8_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_pose = 250 num_novel_pose = 87 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S8/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_h36m_s9_novel_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'novel_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir_pattern = './work_dirs/animatable_nerf/h36m_s1_{}/' # noqa work_dir = './work_dirs/animatable_nerf/h36m_s9_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') os.system('mkdir -p {}'.format(work_dir)) load_from = os.path.join(work_dir, 'latest.pth') if not os.path.exists(load_from): ckpt_path = os.path.join(work_dir_pattern.format('train_pose'), 'latest.pth') os.system('cp {} {}'.format(ckpt_path, work_dir)) num_train_pose = 260 num_novel_pose = 133 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S9/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] novel_pose_frame=[ num_train_pose * frame_interval, (num_train_pose + num_novel_pose) * frame_interval ], frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_h36m_s9_render_train_pose.py ================================================ _base_ = ['an_h36m_s9_train_pose.py'] from configs.animatable_nerf.an_h36m_s9_train_pose import * test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='NBSaveSpiralHook', params=dict()), ] ratio = 1. basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S9/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=ratio, # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) frame_idx_to_smpl_idx = lambda x: x frame_idx_to_latent_idx = lambda x: x valdata_cfg = basedata_cfg.copy() valdata_cfg.update( dict(mode='render', num_render_views=50, frame_idx=0, frame_idx_to_smpl_idx=frame_idx_to_smpl_idx, frame_idx_to_latent_idx=frame_idx_to_latent_idx, render_H=int(1000 * ratio), render_W=int(1000 * ratio), ratio=ratio)) test_pipeline = [ dict( type='LoadCamAndSmplParam', enable=True, ), # 读取相机和Smpl参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True, sel_rgb=False), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints', 'spiral_poses', 'K' ]), ] data.update( dict(test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), )) ================================================ FILE: configs/animatable_nerf/an_h36m_s9_train_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'train_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/animatable_nerf/h36m_s9_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_pose = 260 num_novel_pose = 133 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 5 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/h36m/S9/Posing', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=1., # reduce the image resolution by ratio unit=1000., training_view=[0, 1, 2], test_view=[3], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/animatable_nerf/an_zjumocap_313_novel_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'novel_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir_pattern = './work_dirs/animatable_nerf/zjumocap_313_{}/' # noqa work_dir = work_dir_pattern.format(phase) timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') os.system('mkdir -p {}'.format(work_dir)) load_from = os.path.join(work_dir, 'latest.pth') if not os.path.exists(load_from): ckpt_path = os.path.join(work_dir_pattern.format('train_pose'), 'latest.pth') os.system('cp {} {}'.format(ckpt_path, work_dir)) num_train_pose = 60 num_novel_pose = 1000 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x).split('_')[4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x).split('_')[4]) - 1 frame_interval = 1 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/zju_mocap/CoreView_313', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=0.5, # reduce the image resolution by ratio unit=1000., training_view=[0, 6, 12, 18], test_view=[1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 19, 20], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] novel_pose_frame=[ num_train_pose * frame_interval, (num_train_pose + num_novel_pose) * frame_interval ], frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict( train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/animatable_nerf/an_zjumocap_313_train_pose.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'animatable_nerf' phase = 'train_pose' # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/animatable_nerf/zjumocap_313_{}/'.format(phase) # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_pose = 60 num_novel_pose = 1000 model = dict( type='AniNeRFNetwork', cfg=dict( chunk=1024 * 4, # mainly work for val phase=phase, tpose_human=dict( type='TPoseHuman', density_mlp=dict( type='AN_DensityMLP', embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), color_mlp=dict( type='AN_ColorMLP', num_train_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 6, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), deform_field=dict( type='DeformField', smpl_threshold=0.05, phase=phase, bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_train_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), novel_pose_bw_mlp=dict( type='AN_BlendWeightMLP', num_pose=num_novel_pose, embedder=dict( type='BaseEmbedder', i_embed= 0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), ), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x).split('_')[4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x).split('_')[4]) - 1 frame_interval = 1 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/zju_mocap/CoreView_313', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=0.5, # reduce the image resolution by ratio unit=1000., training_view=[0, 6, 12, 18], test_view=[1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 19, 20], num_train_pose=num_train_pose, training_frame=[0, num_train_pose * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', phase=phase, img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='CalculateSkelTransf', enable=True, ), # 计算骨架变换矩阵 dict( type='AninerfIdxConversion', enable=True, ), # 变换latent index dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams', 'parents', 'joints' ]), ] data = dict(train_loader=dict(batch_size=1, num_workers=0), train=dict( type='AniNeRFDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='AniNeRFDataset', cfg=valdata_cfg, pipeline=test_pipeline, )) ================================================ FILE: configs/bungeenerf/bungeenerf_multiscale_google.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'bungeenerf' # [nerf, kilo_nerf, mip_nerf, bungeenerf] # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) max_iters = 200000 lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=500, by_epoch=False) log_level = 'INFO' log_config = dict(interval=5, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 500), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='SaveSpiralHook', params=dict(save_folder='visualizations/spiral')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='BungeeNerfTrainRunner') test_runner = dict(type='BungeeNerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 stage = 0 # current stage for training work_dir = './work_dirs/bungeenerf/#DATANAME#/stage_%d/' % stage timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'mutiscale_google' no_batching = True # only take random rays from 1 image at a time white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = False # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 2 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 65 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') model = dict( type='BungeeNerfNetwork', cfg=dict( phase='train', # 'train' or 'test' ray_shape='cone', # The shape of cast rays ('cone' or 'cylinder'). resample_padding=0.01, # Dirichlet/alpha "padding" on the histogram. N_importance=65, # number of additional fine samples per ray is_perturb=is_perturb, chunk=1024 * 32, # mainly work for val bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), mlp=dict( # coarse model type='BungeeNerfMLP', cur_stage=stage, # resblock nums netwidth=256, # channels per layer netchunk=1024 * 64, # number of pts sent through network in parallel; embedder=dict( type='BungeeEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) ), ), render=dict( # render model type='BungeeNerfRender', white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended ), ) basedata_cfg = dict( dataset_type=dataset_type, datadir='data/multiscale_google/#DATANAME#', white_bkgd=white_bkgd, factor=3, N_rand_per_sampler=N_rand_per_sampler, mode='train', cur_stage=stage, holdout=16, is_batching=True, # True for blender, False for llff ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() testdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) testdata_cfg.update(dict(mode='test', testskip=0)) train_pipeline = [ dict( type='BungeeBatchSample', enable=True, N_rand=N_rand_per_sampler, ), dict(type='DeleteUseless', keys=['rays_rgb', 'idx']), dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'scale_code'], ), dict( type='GetViewdirs', enable=use_viewdirs, ), dict(type='BungeeGetBounds', enable=True), dict(type='BungeeGetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='DeleteUseless', enable=True, keys=['pose', 'iter_n']), # 删除pose 其实求完ray就不再需要了 ] test_pipeline = [ dict( type='ToTensor', enable=True, keys=['pose'], ), dict( type='GetRays', include_radius=True, enable=True, ), dict(type='FlattenRays', include_radius=True, enable=True), # 原来是(H, W, ..) 变成(H*W, ...) 记录下原来的尺寸 dict( type='GetViewdirs', enable=use_viewdirs, ), dict(type='BungeeGetBounds', enable=True), dict(type='BungeeGetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # 同上train_pipeline dict(type='PerturbZvals', enable=False), # 测试集不扰动 dict(type='DeleteUseless', enable=True, keys=['pose']), # 删除pose 其实求完ray就不再需要了 ] data = dict( train_loader=dict(batch_size=1, num_workers=4), train=dict( type='BungeeDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='BungeeDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='BungeeDataset', cfg=testdata_cfg, pipeline=test_pipeline, # same pipeline as validation ), ) ================================================ FILE: configs/gnr/gnr_genebody.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'gnr' # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=1, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/gnr/' # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = False # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 256 # number of coarse samples per ray use_feat_sr = False # resume_from = os.path.join(work_dir, 'latest.pth') # load_from = os.path.join(work_dir, 'latest.pth') model = dict( type='GnrNetwork', cfg=dict( raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) use_viewdirs=use_viewdirs, projection_mode='perspective', is_perturb=is_perturb, use_feat_sr=False, use_smpl_sdf=True, use_t_pose=True, use_smpl_depth=True, use_attention=True, ddp=False, chunk=524288, # mainly work for val num_views=4, image_filter=dict(type='HGFilter', opt=dict(norm='group', num_stack=4, num_hourglass=2, skip_hourglass=True, hg_down='ave_pool', hourglass_dim=256)), sr_filter=dict(type='SRFilters', order=2, out_ch=256), nerf=dict(type='GNRMLP', opt=dict( input_ch_feat=64 if use_feat_sr else 256, smpl_type='smplx', use_smpl_sdf=True, use_t_pose=True, use_nml=True, use_attention=True, weighted_pool=True, use_sh=True, use_viewdirs=True, use_occlusion=True, use_smpl_depth=True, use_occlusion_net=True, angle_diff=False, use_bn=False, skips=[2, 4, 6], num_views=4, )), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays nerf_renderer=dict( # render model type='GnrRenderer', opt=dict(model=None, N_samples=256, ddp=False, train_encoder=False, projection_mode='perspective', loadSize=512, num_views=4, N_rand=1024, N_grid=512, use_nml=True, use_attention=True, debug=False, use_vgg=False, use_smpl_sdf=True, use_t_pose=True, use_smpl_depth=True, regularization=False, angle_diff=False, use_occlusion=True, use_occlusion_net=True, use_vh_free=False, use_white_bkgd=False, chunk=524288, N_rand_infer=4096, use_vh=True, laplacian=5, vh_overhead=1), ), train_encoder=False)) basedata_cfg = dict(dataset_type=dataset_type, dataroot='path/to/GeneBodyDataset', eval_skip=1, train_skip=1, loadSize=512, num_views=4, use_smpl_sdf=True, use_t_pose=True, smpl_type='smplx', t_pose_path='path/to/smpl_t_pose', use_smpl_depth=True, use_white_bkgd=False, random_multiview=False) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) data = dict( train_loader=dict(batch_size=1, num_workers=6), train=dict(type='GeneBodyDataset', opt=traindata_cfg, phase='train', pipeline=[]), val_loader=dict(batch_size=1, num_workers=6), val=dict(type='GeneBodyDataset', opt=valdata_cfg, phase='val', pipeline=[]), test_loader=dict(batch_size=1, num_workers=6), test=dict(type='GeneBodyDataset', opt=valdata_cfg, phase='test', pipeline=[]), ) ================================================ FILE: configs/instant_ngp/nerf_blender_local01.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime # [nerf, kilo_nerf, mip_nerf] method = 'nerf' # optimizer optimizer = dict(type='Adam', lr=1e-2, betas=(0.9, 0.99), eps=1e-15, weight_decay=1e-6) optimizer_config = dict(grad_clip=None) max_iters = 50000 lr_config = dict(policy='step', step=10000, gamma=0.2, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) custom_hooks = [dict(type='EMAHook', momentum=0.05)] log_level = 'INFO' log_config = dict(interval=500, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 500), ('val', 1)] # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='OccupationHook', params=dict()), dict(type='PassIterHook', params=dict()), dict(type='PassDatasetHook', params=dict(), variables=dict(dataset='trainset')), dict(type='ModifyBatchsizeHook', params=dict()), dict(type='PassSamplerIterHook', params=dict()), ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='PassDatasetHook', params=dict(), variables=dict(dataset='testset')), dict(type='HashSaveSpiralHook', params=dict(save_folder='visualizations/spirals', ), variables=dict(cfg='cfg')), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) work_dir = './work_dirs/instant_ngp/nerf_#DATANAME#_base01/' timestamp = datetime.now().strftime('%d-%b-%H-%M') dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) load_alpha = True use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 4096 # how many N_rand in get_item() function # lindisp = False # sampling linearly in disparity rather than depth # resume_from = os.path.join(work_dir, 'latest.pth') # load_from = os.path.join(work_dir, 'latest.pth') model = dict( type='HashNerfNetwork', cfg=dict( phase='train', # 'train' or 'test' chunk=4096, # mainly work for val bs_data='rays_o', ), mlp=dict( # coarse model type='HashNerfMLP', bound=1, embedder_pos=dict(n_input_dims=3, encoding_config=dict( otype='HashGrid', n_levels=16, n_features_per_level=2, log2_hashmap_size=19, base_resolution=16, interpolation='Linear', )), embedder_dir=dict(n_input_dims=3, encoding_config=dict( otype='SphericalHarmonics', degree=4, )), density_net=dict(n_input_dims=32, n_output_dims=16, network_config=dict( otype='FullyFusedMLP', activation='ReLU', output_activation='None', n_neurons=64, num_layers=1, )), color_net=dict( # n_input_dims=32, # embedder_dir's out + density_net's out n_output_dims=3, network_config=dict( otype='FullyFusedMLP', activation='ReLU', output_activation='None', n_neurons=64, num_layers=2, )), ), sampler=dict( type='NGPGridSampler', update_grid_freq=16, update_block_size=5000000, n_rays_per_batch=N_rand_per_sampler, cone_angle_constant=0.00390625, near_distance=0.2, target_batch_size=1 << 18, rgb_activation=2, density_activation=3, ), render=dict( type='HashNerfRender', bg_color=[0, 0, 0], ), ) basedata_cfg = dict( dataset_type=dataset_type, N_rand_per_sampler=N_rand_per_sampler, datadir='data/nerf_synthetic/#DATANAME#', half_res=False, # load blender synthetic data at 400x400 or 800x800 testskip=1, white_bkgd=white_bkgd, load_alpha=load_alpha, is_batching=True, # True for hashnerf mode='train', val_n=10, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() testdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val', )) testdata_cfg.update(dict(mode='test', testskip=100)) train_pipeline = [ dict(type='HashBatchSample', N_rand=N_rand_per_sampler), dict(type='RandomBGColor'), dict(type='DeleteUseless', keys=['rays_rgb', 'iter_n', 'idx']), ] test_pipeline = [ dict( type='HashGetRays', enable=True, ), dict(type='FlattenRays', enable=True), dict( type='HashSetImgids', enable=True, ), # dict( # type='RandomBGColor', # enable=True, # ), dict(type='DeleteUseless', enable=True, keys=['pose', 'idx']), ] data = dict( # num_workers>0 lead to low psnr ? train_loader=dict(batch_size=1, num_workers=0), train=dict( type='HashNerfDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='HashNerfDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='HashNerfDataset', cfg=testdata_cfg, pipeline=test_pipeline, # same pipeline as validation ), ) ================================================ FILE: configs/kilonerf/kilonerf_distill_BlendedMVS_base01.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'kilo_nerf' # [nerf, kilo_nerf, mip_nerf] model_type = 'multi_network' #[single_network, multi_network] phase = 'distill' # [pretrain, distill, finetune] resolution_table = dict( Character=[8, 16, 8], Fountain=[14, 16, 14], Jade=[16, 14, 16], Statues=[12, 14, 16], ) # optimizer optimizer = dict(type='Adam', lr=0.001) optimizer_config = dict(grad_clip=None) max_iters = 150000 # max_iters = 50000 # Character only needs 50000 iterations, other scenes need 150000 iterations lr_config = None checkpoint_config = None log_level = 'INFO' log_config = dict(interval=500, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 500), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SaveDistillResultsHook', params=dict(), variables=dict(cfg='cfg', trainset='trainset')), dict(type='DistllCycleHook', params=dict(), variables=dict(cfg='cfg')), dict(type='OccupationHook', params=dict()), # no need for open-source vision ] # runner train_runner = dict(type='KiloNerfDistillTrainRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/kilonerfs/BlendedMVS_#DATANAME#_base01/distill' timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'nsvf' datadir = 'data/nsvf/BlendedMVS/#DATANAME#' max_num_networks = 512 num_networks = max_num_networks outputs = 'color_and_density' alpha_distance = 0.0211 convert_density_to_alpha = True quantile_se = 0.99 skip_final = True tree_type = 'kdtree_longest' test_error_metric = 'quantile_se' equal_split_metric = 'mse' max_error = 100000 train_batch_size = 128 # resume_from = os.path.join(work_dir, 'latest.pth') # load_from = os.path.join(work_dir, 'latest.pth') model = dict( type='StudentNerfNetwork', cfg=dict( outputs=outputs, test_batch_size=512, query_batch_size=80000, ), pretrained_kwargs=dict( config='./configs/kilonerfs/kilonerf_pretrain_BlendedMVS_base01.py', checkpoint= './work_dirs/kilonerfs/BlendedMVS_#DATANAME#_base01/pretrain/latest.pth' ), multi_network=dict( # multi network type='KiloNerfMultiNetwork', num_networks=max_num_networks, alpha_rgb_initalization= 'pass_actual_nonlinearity', # in multi network model init bias_initialization_method='standard', # in multi network model init direction_layer_size=32, # in multi network model init hidden_layer_size=32, # in multi network model init late_feed_direction=True, # in multi network model init network_rng_seed=8078673, # in multi network model init nonlinearity_initalization= 'pass_actual_nonlinearity', # in multi network model init num_hidden_layers=2, # in multi network model init num_output_channels=4, refeed_position_index=None, # in multi network model init use_same_initialization_for_all_networks= True, # in multi network model init weight_initialization_method= 'kaiming_uniform', # in multi network model init embedder=dict( type='KiloNerfFourierEmbedder', num_networks=max_num_networks, # num of networks, will be changed input_ch=3, multires= 10, # num_frequencies, log2 of max freq for positional encoding (3D location) multires_dirs= 4, # num_frequencies_direction, this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) ), ), render=dict( # render model type='KiloNerfSimpleRender', alpha_distance=alpha_distance, convert_density_to_alpha=convert_density_to_alpha, ), ) basedata_cfg = dict( dataset_type=dataset_type, datadir=datadir, mode='train', batch_index=0, work_dir=work_dir, num_examples_per_network=1000000, max_num_networks=max_num_networks, train_batch_size=train_batch_size, outputs=outputs, is_batching=False, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val', num_examples_per_network=20000)) train_pipeline = [ dict( type='ExampleSample', enable=True, train_batch_size=train_batch_size, ), dict( type='ToTensor', enable=True, keys=['domain_mins', 'domain_maxs'], ), dict(type='DeleteUseless', enable=True, keys=[ 'all_examples' ]), # delete batch_examples after getting batch_inputs and batch_targets ] test_pipeline = [ dict( type='ToTensor', enable=True, keys=['domain_mins', 'domain_maxs'], ), ] data = dict( train_loader=dict(batch_size=1, num_workers=4), train=dict( type='KiloNerfNodeDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='KiloNerfNodeDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/kilonerf/kilonerf_distill_Synthetic_NeRF_base01.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'kilo_nerf' # [nerf, kilo_nerf, mip_nerf] model_type = 'multi_network' #[single_network, multi_network] phase = 'distill' # [pretrain, distill, finetune] resolution_table = dict(Chair=[13, 13, 16], Drums=[16, 13, 12], Ficus=[8, 11, 16], Hotdog=[16, 16, 6], Lego=[9, 16, 10], Materials=[16, 14, 5], Mic=[16, 16, 15], Ship=[16, 16, 9]) # optimizer optimizer = dict(type='Adam', lr=0.001) optimizer_config = dict(grad_clip=None) # max_iters = 150000 max_iters = 50000 # Hotdog only needs 50000 iterations, other scenes need 150000 iterations lr_config = None checkpoint_config = None log_level = 'INFO' log_config = dict(interval=500, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 500), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SaveDistillResultsHook', params=dict(), variables=dict(cfg='cfg', trainset='trainset')), dict(type='DistllCycleHook', params=dict(), variables=dict(cfg='cfg')), dict(type='OccupationHook', params=dict()), # no need for open-source vision ] # runner train_runner = dict(type='KiloNerfDistillTrainRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/kilonerfs/Synthetic_NeRF_#DATANAME#_base01/distill' timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'nsvf' datadir = 'data/nsvf/Synthetic_NeRF/#DATANAME#' max_num_networks = 512 num_networks = max_num_networks outputs = 'color_and_density' alpha_distance = 0.0211 convert_density_to_alpha = True quantile_se = 0.99 skip_final = True tree_type = 'kdtree_longest' test_error_metric = 'quantile_se' equal_split_metric = 'mse' max_error = 100000 train_batch_size = 128 # resume_from = os.path.join(work_dir, 'latest.pth') # load_from = os.path.join(work_dir, 'latest.pth') model = dict( type='StudentNerfNetwork', cfg=dict( outputs=outputs, test_batch_size=512, query_batch_size=80000, ), pretrained_kwargs=dict( config='./configs/kilonerfs/kilonerf_pretrain_Synthetic_NeRF_base01.py', checkpoint= './work_dirs/kilonerfs/Synthetic_NeRF_#DATANAME#_base01/pretrain/latest.pth' ), multi_network=dict( # multi network type='KiloNerfMultiNetwork', num_networks=max_num_networks, alpha_rgb_initalization= 'pass_actual_nonlinearity', # in multi network model init bias_initialization_method='standard', # in multi network model init direction_layer_size=32, # in multi network model init hidden_layer_size=32, # in multi network model init late_feed_direction=True, # in multi network model init network_rng_seed=8078673, # in multi network model init nonlinearity_initalization= 'pass_actual_nonlinearity', # in multi network model init num_hidden_layers=2, # in multi network model init num_output_channels=4, refeed_position_index=None, # in multi network model init use_same_initialization_for_all_networks= True, # in multi network model init weight_initialization_method= 'kaiming_uniform', # in multi network model init embedder=dict( type='KiloNerfFourierEmbedder', num_networks=max_num_networks, # num of networks, will be changed input_ch=3, multires= 10, # num_frequencies, log2 of max freq for positional encoding (3D location) multires_dirs= 4, # num_frequencies_direction, this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) ), ), render=dict( # render model type='KiloNerfSimpleRender', alpha_distance=alpha_distance, convert_density_to_alpha=convert_density_to_alpha, ), ) basedata_cfg = dict( dataset_type=dataset_type, datadir=datadir, mode='train', batch_index=0, work_dir=work_dir, num_examples_per_network=1000000, max_num_networks=max_num_networks, train_batch_size=train_batch_size, outputs=outputs, is_batching=False, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val', num_examples_per_network=20000)) train_pipeline = [ dict( type='ExampleSample', enable=True, train_batch_size=train_batch_size, ), dict( type='ToTensor', enable=True, keys=['domain_mins', 'domain_maxs'], ), dict(type='DeleteUseless', enable=True, keys=[ 'all_examples' ]), # delete batch_examples after getting batch_inputs and batch_targets ] test_pipeline = [ dict( type='ToTensor', enable=True, keys=['domain_mins', 'domain_maxs'], ), ] data = dict( train_loader=dict(batch_size=1, num_workers=4), train=dict( type='KiloNerfNodeDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='KiloNerfNodeDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/kilonerf/kilonerf_finetune_BlendedMVS_base01.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'kilo_nerf' # [nerf, kilo_nerf, mip_nerf] model_type = 'multi_network' #[single_network, multi_network] phase = 'finetune' # [pretrain, distill, finetune] resolution_table = dict( Character=[128, 256, 128], Fountain=[224, 256, 224], Jade=[256, 224, 256], Statues=[192, 224, 256], ) # optimizer optimizer = dict(type='Adam', lr=0.001, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) max_iters = 1000000 lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=50000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 50000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='SaveSpiralHook', params=dict(save_folder='visualizations/spiral')), dict(type='CalElapsedTimeHook', params=dict()), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='KiloNerfTrainRunner') test_runner = dict(type='KiloNerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/kilonerfs/BlendedMVS_#DATANAME#_base01/finetune' timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'nsvf' datadir = 'data/nsvf/BlendedMVS/#DATANAME#' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (Fountain and Jade have black background, set white_bkgd=False) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 8192 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 384 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') # load_from = os.path.join(work_dir, 'latest.pth') occupancy_checkpoint = './work_dirs/kilonerfs/BlendedMVS_#DATANAME#_base01/pretrain_occupancy/occupancy.pth' distilled_config = './configs/kilonerfs/kilonerf_distill_BlendedMVS_base01.py' distilled_checkpoint = './work_dirs/kilonerfs/BlendedMVS_#DATANAME#_base01/distill/checkpoint.pth' model = dict( type='KiloNerfNetwork', cfg=dict( phase='train', # 'train' or 'test' N_importance=0, # number of additional fine samples per ray is_perturb=is_perturb, chunk=40000, # chunk_size, mainly work for val l2_regularization_lambda=1.0e-06, bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), mlp=dict( # multi_network model type='KiloNerfMLP', distilled_config=distilled_config, distilled_checkpoint=distilled_checkpoint, occupancy_checkpoint=occupancy_checkpoint, embedder=dict( type='KiloNerfFourierEmbedder', num_networks=1, # num_networks, teacher nerf network only have 1 input_ch=3, multires= 10, # num_frequencies, log2 of max freq for positional encoding (3D location) multires_dirs= 4, # num_frequencies_direction, this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) ), ), mlp_fine=None, render=dict( # render model type='NerfRender', white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended ), ) basedata_cfg = dict( dataset_type=dataset_type, datadir=datadir, half_res=False, # load nsvf synthetic data at 800x800 testskip= 8, # will load 1/N images from test/val sets, useful for large datasets like deepvoxels white_bkgd=white_bkgd, is_batching=False, render_test=True, mode='train', ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() testdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) testdata_cfg.update(dict(mode='test', testskip=1)) train_pipeline = [ dict(type='Sample'), dict(type='DeleteUseless', keys=['images', 'poses', 'i_data', 'idx']), dict( type='ToTensor', enable=True, keys=['pose', 'target_s'], ), dict( type='GetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='SelectRays', enable=True, sel_n=N_rand_per_sampler, precrop_iters=0, precrop_frac=0.5), # 抽取N个射线 dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=['pose', 'iter_n']), # 删除pose 其实求完ray就不再需要了 ] test_pipeline = [ dict( type='ToTensor', enable=True, keys=['pose'], ), dict( type='KilonerfGetRays', enable=True, expand_origin=True, ), dict(type='FlattenRays', enable=True), # 原来是(H, W, ..) 变成(H*W, ...) 记录下原来的尺寸 dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # 同上train_pipeline dict(type='PerturbZvals', enable=False), # 测试集不扰动 dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=['pose']), # 删除pose 其实求完ray就不再需要了 ] data = dict( train_loader=dict(batch_size=1, num_workers=4), train=dict( type='KiloNerfDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='KiloNerfDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='KiloNerfDataset', cfg=testdata_cfg, pipeline=test_pipeline, # same pipeline as validation ), ) ================================================ FILE: configs/kilonerf/kilonerf_finetune_Synthetic_NeRF_base01.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'kilo_nerf' # [nerf, kilo_nerf, mip_nerf] model_type = 'multi_network' #[single_network, multi_network] phase = 'finetune' # [pretrain, distill, finetune] resolution_table = dict(Chair=[208, 208, 256], Drums=[256, 208, 192], Ficus=[128, 176, 256], Hotdog=[256, 256, 96], Lego=[144, 256, 160], Materials=[256, 224, 80], Mic=[256, 256, 240], Ship=[256, 256, 144]) # optimizer optimizer = dict(type='Adam', lr=0.001, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) max_iters = 1000000 lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=50000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', max_iters), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='SaveSpiralHook', params=dict(save_folder='visualizations/spiral')), dict(type='CalElapsedTimeHook', params=dict()), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='KiloNerfTrainRunner') test_runner = dict(type='KiloNerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/kilonerfs/Synthetic_NeRF_#DATANAME#_base01/finetune' timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'nsvf' datadir = 'data/nsvf/Synthetic_NeRF/#DATANAME#' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = True # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 8192 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 384 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') # load_from = os.path.join(work_dir, 'latest.pth') occupancy_checkpoint = './work_dirs/kilonerfs/Synthetic_NeRF_#DATANAME#_base01/pretrain_occupancy/occupancy.pth' distilled_config = './configs/kilonerfs/kilonerf_distill_Synthetic_NeRF_base01.py' distilled_checkpoint = './work_dirs/kilonerfs/Synthetic_NeRF_#DATANAME#_base01/distill/checkpoint.pth' model = dict( type='KiloNerfNetwork', cfg=dict( phase='train', # 'train' or 'test' N_importance=0, # number of additional fine samples per ray is_perturb=is_perturb, chunk=40000, # chunk_size, mainly work for val l2_regularization_lambda=1.0e-06, bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), mlp=dict( # multi_network model type='KiloNerfMLP', distilled_config=distilled_config, distilled_checkpoint=distilled_checkpoint, occupancy_checkpoint=occupancy_checkpoint, embedder=dict( type='KiloNerfFourierEmbedder', num_networks=1, # num_networks, teacher nerf network only have 1 input_ch=3, multires= 10, # num_frequencies, log2 of max freq for positional encoding (3D location) multires_dirs= 4, # num_frequencies_direction, this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) ), ), mlp_fine=None, render=dict( # render model type='NerfRender', white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended ), ) basedata_cfg = dict( dataset_type=dataset_type, datadir=datadir, half_res=False, # load nsvf synthetic data at 800x800 testskip= 8, # will load 1/N images from test/val sets, useful for large datasets like deepvoxels white_bkgd=white_bkgd, is_batching=False, render_test=True, mode='train', ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() testdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) testdata_cfg.update(dict(mode='test', testskip=1)) train_pipeline = [ dict(type='Sample'), dict(type='DeleteUseless', keys=['images', 'poses', 'i_data', 'idx']), dict( type='ToTensor', enable=True, keys=['pose', 'target_s'], ), dict( type='GetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='SelectRays', enable=True, sel_n=N_rand_per_sampler, precrop_iters=0, precrop_frac=0.5), # 抽取N个射线 dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=['pose', 'iter_n']), # 删除pose 其实求完ray就不再需要了 ] test_pipeline = [ dict( type='ToTensor', enable=True, keys=['pose'], ), dict( type='KilonerfGetRays', enable=True, expand_origin=True, ), dict(type='FlattenRays', enable=True), # 原来是(H, W, ..) 变成(H*W, ...) 记录下原来的尺寸 dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # 同上train_pipeline dict(type='PerturbZvals', enable=False), # 测试集不扰动 dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=['pose']), # 删除pose 其实求完ray就不再需要了 ] data = dict( train_loader=dict(batch_size=1, num_workers=4), train=dict( type='KiloNerfDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='KiloNerfDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='KiloNerfDataset', cfg=testdata_cfg, pipeline=test_pipeline, # same pipeline as validation ), ) ================================================ FILE: configs/kilonerf/kilonerf_pretrain_BlendedMVS_base01.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'kilo_nerf' # [nerf, kilo_nerf, mip_nerf] model_type = 'single_network' #[single_network, multi_network] phase = 'pretrain' # [pretrain, distill, finetune] resolution_table = dict( Character=[128, 256, 128], Fountain=[224, 256, 224], Jade=[256, 224, 256], Statues=[192, 224, 256], ) # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) max_iters = 600000 # max_iters = 100000 # Character only needs 100000 iterations, other scenes need 600000 iterations lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=50000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 50000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='SaveSpiralHook', params=dict(save_folder='visualizations/spiral')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision dict(type='CalElapsedTimeHook', params=dict()), dict(type='BuildOccupancyTreeHook', params=dict(), variables=dict(cfg='cfg')) ] # runner train_runner = dict(type='KiloNerfTrainRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/kilonerfs/BlendedMVS_#DATANAME#_base01/pretrain' timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'nsvf' datadir = 'data/nsvf/BlendedMVS/#DATANAME#' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = True # set to render synthetic data on a white bkgd (Fountain and Jade have black background, set white_bkgd=False) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 384 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') # load_from = os.path.join(work_dir, 'latest.pth') build_occupancy_tree_config = dict( subsample_resolution=[3, 3, 3], threshold=10, voxel_batch_size=16384, work_dir= './work_dirs/kilonerfs/BlendedMVS_#DATANAME#_base01/pretrain_occupancy') model = dict( type='NerfNetwork', cfg=dict( phase='train', # 'train' or 'test' N_importance=0, # number of additional fine samples per ray is_perturb=is_perturb, chunk=16384, # chunk_size, mainly work for val bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), mlp=dict( # coarse model type='NerfMLP', skips=[4], netdepth=8, # layers in network netwidth=256, # channels per layer netchunk=1024 * 64, # number of pts sent through network in parallel; output_ch=4, # 5 if cfg.N_importance>0 else 4 use_viewdirs=use_viewdirs, embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) ), ), mlp_fine=None, render=dict( # render model type='NerfRender', white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended ), ) basedata_cfg = dict( dataset_type=dataset_type, datadir=datadir, half_res=False, # load nsvf synthetic data at 800x800 testskip= 8, # will load 1/N images from test/val sets, useful for large datasets like deepvoxels white_bkgd=white_bkgd, is_batching=False, render_test=True, mode='train', ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict(type='Sample'), dict(type='DeleteUseless', keys=['images', 'poses', 'i_data', 'idx']), dict( type='ToTensor', enable=True, keys=['pose', 'target_s'], ), dict( type='GetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='SelectRays', enable=True, sel_n=N_rand_per_sampler, precrop_iters=10000, precrop_frac=0.5), # 抽取N个射线 dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=['pose', 'iter_n']), # 删除pose 其实求完ray就不再需要了 ] test_pipeline = [ dict( type='ToTensor', enable=True, keys=['pose'], ), dict( type='GetRays', enable=True, ), dict(type='FlattenRays', enable=True), # 原来是(H, W, ..) 变成(H*W, ...) 记录下原来的尺寸 dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # 同上train_pipeline dict(type='PerturbZvals', enable=False), # 测试集不扰动 dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=['pose']), # 删除pose 其实求完ray就不再需要了 ] data = dict( train_loader=dict(batch_size=1, num_workers=4), train=dict( type='SceneBaseDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='SceneBaseDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/kilonerf/kilonerf_pretrain_Synthetic_NeRF_base01.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'kilo_nerf' # [nerf, kilo_nerf, mip_nerf] model_type = 'single_network' #[single_network, multi_network] phase = 'pretrain' # [pretrain, distill, finetune] resolution_table = dict(Chair=[208, 208, 256], Drums=[256, 208, 192], Ficus=[128, 176, 256], Hotdog=[256, 256, 96], Lego=[144, 256, 160], Materials=[256, 224, 80], Mic=[256, 256, 240], Ship=[256, 256, 144]) # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) # max_iters = 600000 max_iters = 100000 # Hotdog only needs 100000 iterations, other scenes need 600000 iterations lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=50000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 50000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='SaveSpiralHook', params=dict(save_folder='visualizations/spiral')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision dict(type='CalElapsedTimeHook', params=dict()), dict(type='BuildOccupancyTreeHook', params=dict(), variables=dict(cfg='cfg')) ] # runner train_runner = dict(type='KiloNerfTrainRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/kilonerfs/Synthetic_NeRF_#DATANAME#_base01/pretrain' timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'nsvf' datadir = 'data/nsvf/Synthetic_NeRF/#DATANAME#' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = True # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 384 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') # load_from = os.path.join(work_dir, 'latest.pth') build_occupancy_tree_config = dict( subsample_resolution=[3, 3, 3], threshold=10, voxel_batch_size=16384, work_dir= './work_dirs/kilonerfs/Synthetic_NeRF_#DATANAME#_base01/pretrain_occupancy' ) model = dict( type='NerfNetwork', cfg=dict( phase='train', # 'train' or 'test' N_importance=0, # number of additional fine samples per ray is_perturb=is_perturb, chunk=16384, # chunk_size, mainly work for val bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), mlp=dict( # coarse model type='NerfMLP', skips=[4], netdepth=8, # layers in network netwidth=256, # channels per layer netchunk=1024 * 64, # number of pts sent through network in parallel; output_ch=4, # 5 if cfg.N_importance>0 else 4 use_viewdirs=use_viewdirs, embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) ), ), mlp_fine=None, render=dict( # render model type='NerfRender', white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended ), ) basedata_cfg = dict( dataset_type=dataset_type, datadir=datadir, half_res=False, # load nsvf synthetic data at 800x800 testskip= 8, # will load 1/N images from test/val sets, useful for large datasets like deepvoxels white_bkgd=white_bkgd, is_batching=False, render_test=True, mode='train', ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict(type='Sample'), dict(type='DeleteUseless', keys=['images', 'poses', 'i_data', 'idx']), dict( type='ToTensor', enable=True, keys=['pose', 'target_s'], ), dict( type='GetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='SelectRays', enable=True, sel_n=N_rand_per_sampler, precrop_iters=10000, precrop_frac=0.5), # 抽取N个射线 dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=['pose', 'iter_n']), # 删除pose 其实求完ray就不再需要了 ] test_pipeline = [ dict( type='ToTensor', enable=True, keys=['pose'], ), dict( type='GetRays', enable=True, ), dict(type='FlattenRays', enable=True), # 原来是(H, W, ..) 变成(H*W, ...) 记录下原来的尺寸 dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # 同上train_pipeline dict(type='PerturbZvals', enable=False), # 测试集不扰动 dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=['pose']), # 删除pose 其实求完ray就不再需要了 ] data = dict( train_loader=dict(batch_size=1, num_workers=4), train=dict( type='SceneBaseDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='SceneBaseDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/mipnerf/mipnerf_blender.py ================================================ import os from datetime import datetime method = 'mip_nerf' # [nerf, kilo_nerf, mip_nerf] use_multiscale = False # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) max_iters = 1000000 lr_config = dict( policy='Mip', lr_init=5e-4, lr_final=5e-6, max_steps=max_iters, lr_delay_steps=2500, lr_delay_mult=0.01, by_epoch=False, ) checkpoint_config = dict(interval=100000, by_epoch=False) optimizer_config = dict(grad_clip=None) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 100000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='val_results/')), dict(type='SaveSpiralHook', params=dict(save_folder='spiral_results/')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict(ndown=1, dump_json=True, save_img=True, save_folder='test_results/'), variables=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = f'/mnt/lustre/ganshikang/Projects/xrnerf/single_results/#DATANAME#' timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc # set to render synthetic data on a white bkgd (always use for dvoxels) white_bkgd = True use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth num_samples = 128 # number of samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') # load_from = os.path.join(work_dir, 'latest.pth') model = dict( type='MipNerfNetwork', cfg=dict( num_levels=2, # The number of sampling levels. # If True, sample linearly in disparity, not in depth. ray_shape='cone', # The shape of cast rays ('cone' or 'cylinder'). resample_padding=0.01, # Dirichlet/alpha "padding" on the histogram. use_multiscale=use_multiscale, # If True, use multiscale. coarse_loss_mult=0.1, # How much to downweight the coarse loss(es). chunk=800, # mainly work for val bs_data='rays_o'), mlp=dict( # coarse model type='NerfMLP', skips=[4], netdepth=8, # layers in network netwidth=256, # channels per layer netchunk=1024 * 32, # number of pts sent through network in parallel; use_viewdirs=use_viewdirs, embedder=dict( type='MipNerfEmbedder', # Min degree of positional encoding for 3D points. min_deg_point=0, # Max degree of positional encoding for 3D points. max_deg_point=16, min_deg_view=0, # Min degree of positional encoding for viewdirs. max_deg_view=4, # Max degree of positional encoding for viewdirs. use_viewdirs=use_viewdirs, append_identity=True), ), render=dict( # render model type='MipNerfRender', # set to render synthetic data on a white bkgd (always use for dvoxels) white_bkgd=white_bkgd, raw_noise_std=0, # Standard deviation of noise added to raw density. density_bias=-1., # The shift added to raw densities pre-activation. rgb_padding=0.001, # Padding added to the RGB outputs. density_activation='softplus', # density activation ), ) basedata_cfg = dict( dataset_type=dataset_type, datadir=f'data/multiscale/#DATANAME#', half_res=False, # load blender synthetic data at 400x400 instead of 800x800 testskip=16, white_bkgd=white_bkgd, is_batching=False, mode='train', ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() testdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) testdata_cfg.update(dict(mode='test', testskip=0)) train_pipeline = [ dict(type='Sample'), dict(type='DeleteUseless', keys=['images', 'poses', 'i_data', 'idx']), dict( type='ToTensor', enable=True, keys=['pose', 'target_s'], ), dict(type='GetRays', enable=True, include_radius=True), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='SelectRays', enable=True, sel_n=N_rand_per_sampler, include_radius=True), # 抽取N个射线 dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True, near_new=2., far_new=6.), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=num_samples + 1, randomized=True), dict(type='DeleteUseless', enable=True, keys=['pose', 'iter_n']), # 删除pose 其实求完ray就不再需要了 ] test_pipeline = [ dict( type='ToTensor', enable=True, keys=['pose'], ), dict(type='GetRays', enable=True, include_radius=True), dict(type='FlattenRays', enable=True, include_radius=True), # 原来是(H, W, ..) 变成(H*W, ...) 记录下原来的尺寸 dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True, near_new=2., far_new=6.), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=num_samples + 1, randomized=False), dict(type='DeleteUseless', enable=True, keys=['pose']), # 删除pose 其实求完ray就不再需要了 ] data = dict( train_loader=dict(batch_size=1, num_workers=0), train=dict( type='SceneBaseDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='SceneBaseDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='SceneBaseDataset', cfg=testdata_cfg, pipeline=test_pipeline, # same pipeline as validation ), ) ================================================ FILE: configs/mipnerf/mipnerf_multiscale.py ================================================ import os from datetime import datetime method = 'mip_nerf' # [nerf, kilo_nerf, mip_nerf] use_multiscale = True # optimizer optimizer = dict(type='Adam', lr=5e-4) optimizer_config = dict(grad_clip=None) max_iters = 1000000 lr_config = dict( policy='Mip', lr_init=5e-4, lr_final=5e-6, max_steps=max_iters, lr_delay_steps=2500, lr_delay_mult=0.01, by_epoch=False, ) checkpoint_config = dict(interval=100000, by_epoch=False) optimizer_config = dict(grad_clip=None) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='val_results/')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset # no need for open-source vision dict(type='OccupationHook', params=dict()), ] test_hooks = [ dict(type='TestHook', params=dict(ndown=4, dump_json=True, save_img=True, save_folder='test_results/'), variables=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/mip_nerf/#DATANAME#/' timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'multiscale' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc # set to render synthetic data on a white bkgd (always use for dvoxels) white_bkgd = True use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth num_samples = 128 # number of samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') # load_from = os.path.join(work_dir, 'latest.pth') model = dict( type='MipNerfNetwork', cfg=dict( num_levels=2, # The number of sampling levels. # If True, sample linearly in disparity, not in depth. ray_shape='cone', # The shape of cast rays ('cone' or 'cylinder'). resample_padding=0.01, # Dirichlet/alpha "padding" on the histogram. use_multiscale=use_multiscale, # If True, use multiscale. coarse_loss_mult=0.1, # How much to downweight the coarse loss(es). chunk=800, # mainly work for val bs_data='rays_o' # randomized=True, # Use randomized stratified sampling. ), mlp=dict( # coarse model type='NerfMLP', skips=[4], netdepth=8, # layers in network netwidth=256, # channels per layer netchunk=1024 * 32, # number of pts sent through network in parallel; use_viewdirs=use_viewdirs, embedder=dict( type='MipNerfEmbedder', # Min degree of positional encoding for 3D points. min_deg_point=0, # Max degree of positional encoding for 3D points. max_deg_point=16, min_deg_view=0, # Min degree of positional encoding for viewdirs. max_deg_view=4, # Max degree of positional encoding for viewdirs. use_viewdirs=use_viewdirs, append_identity=True), ), render=dict( # render model type='MipNerfRender', # set to render synthetic data on a white bkgd (always use for dvoxels) white_bkgd=white_bkgd, raw_noise_std=0, # Standard deviation of noise added to raw density. density_bias=-1., # The shift added to raw densities pre-activation. rgb_padding=0.001, # Padding added to the RGB outputs. density_activation='softplus', # density activation ), ) basedata_cfg = dict( dataset_type=dataset_type, datadir=f'data/multiscale/#DATANAME#', white_bkgd=white_bkgd, mode='train', N_rand_per_sampler=N_rand_per_sampler, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() testdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) testdata_cfg.update(dict(mode='test')) ray_keys = ['rays_o', 'rays_d', 'viewdirs', 'radii', 'lossmult', 'near', 'far'] train_pipeline = [ dict(type='MipMultiScaleSample', keys=['target_s'] + ray_keys, N_rand=N_rand_per_sampler), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=num_samples + 1, randomized=True), dict(type='ToTensor', keys=['target_s'] + ray_keys), ] test_pipeline = [ dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=num_samples + 1, randomized=False), dict(type='ToTensor', keys=['image'] + ray_keys), ] data = dict( train_loader=dict(batch_size=1, num_workers=1), train=dict( type='MipMultiScaleDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='MipMultiScaleDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='MipMultiScaleDataset', cfg=testdata_cfg, pipeline=test_pipeline, # same pipeline as validation ), ) ================================================ FILE: configs/nerf/nerf_blender_base01.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'nerf' # [nerf, kilo_nerf, mip_nerf] # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) max_iters = 200000 lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=5, by_epoch=False) log_level = 'INFO' log_config = dict(interval=5, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 5), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='SaveSpiralHook', params=dict(save_folder='visualizations/spiral')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/nerf/nerf_#DATANAME#_base01/' timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = True # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 4 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') model = dict( type='NerfNetwork', cfg=dict( phase='train', # 'train' or 'test' N_importance=128, # number of additional fine samples per ray is_perturb=is_perturb, chunk=1024 * 32, # mainly work for val bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), mlp=dict( # coarse model type='NerfMLP', skips=[4], netdepth=8, # layers in network netwidth=256, # channels per layer netchunk=1024 * 32, # number of pts sent through network in parallel; output_ch=5, # 5 if cfg.N_importance>0 else 4 use_viewdirs=use_viewdirs, embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) ), ), mlp_fine=dict( # fine model type='NerfMLP', skips=[4], netdepth=8, # layers in fine network netwidth=256, # channels per layer in fine network netchunk=1024 * 32, output_ch=5, # 5 if cfg.N_importance>0 else 4 use_viewdirs=use_viewdirs, # same as above embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) ), ), render=dict( # render model type='NerfRender', white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended ), ) basedata_cfg = dict( dataset_type=dataset_type, datadir='data/nerf_synthetic/#DATANAME#', half_res=True, # load blender synthetic data at 400x400 instead of 800x800 testskip= 8, # will load 1/N images from test/val sets, useful for large datasets like deepvoxels white_bkgd=white_bkgd, is_batching=False, # True for blender, False for llff mode='train', ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() testdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) testdata_cfg.update(dict(mode='test', testskip=0)) train_pipeline = [ dict(type='Sample'), dict(type='DeleteUseless', keys=['images', 'poses', 'i_data', 'idx']), dict( type='ToTensor', enable=True, keys=['pose', 'target_s'], ), dict( type='GetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='SelectRays', enable=True, sel_n=N_rand_per_sampler, precrop_iters=500, precrop_frac=0.5), # 抽取N个射线 dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=['pose', 'iter_n']), # 删除pose 其实求完ray就不再需要了 ] test_pipeline = [ dict( type='ToTensor', enable=True, keys=['pose'], ), dict( type='GetRays', enable=True, ), dict(type='FlattenRays', enable=True), # 原来是(H, W, ..) 变成(H*W, ...) 记录下原来的尺寸 dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # 同上train_pipeline dict(type='PerturbZvals', enable=False), # 测试集不扰动 dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=['pose']), # 删除pose 其实求完ray就不再需要了 ] data = dict( train_loader=dict(batch_size=1, num_workers=4), train=dict( type='SceneBaseDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='SceneBaseDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='SceneBaseDataset', cfg=testdata_cfg, pipeline=test_pipeline, # same pipeline as validation ), ) ================================================ FILE: configs/nerf/nerf_llff_base01.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'nerf' # [nerf, kilo_nerf, mip_nerf] # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) max_iters = 20 lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=5, by_epoch=False) log_level = 'INFO' log_config = dict(interval=5, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 5), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='SaveSpiralHook', params=dict(save_folder='visualizations/spiral')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision # dict(type='SaveDistillResultsHook', params=dict(), variables=dict(model='network', cfg='cfg', trainset='trainset')), # kilo示例 ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/nerf/nerf_#DATANAME#_base01/' timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'llff' no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') model = dict( type='NerfNetwork', cfg=dict( phase='train', # 'train' or 'test' N_importance=128, # number of additional fine samples per ray is_perturb=is_perturb, chunk=1024 * 32, # mainly work for val bs_data='rays_o', # the data's shape indicates the real batch-size ), mlp=dict( # coarse model type='NerfMLP', skips=[4], netdepth=8, # layers in network netwidth=256, # channels per layer netchunk=1024 * 32, # number of pts sent through network in parallel; output_ch=5, # 5 if cfg.N_importance>0 else 4 use_viewdirs=use_viewdirs, embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) ), ), mlp_fine=dict( # fine model type='NerfMLP', skips=[4], netdepth=8, # layers in fine network netwidth=256, # channels per layer in fine network netchunk=1024 * 32, output_ch=5, # 5 if cfg.N_importance>0 else 4 use_viewdirs=use_viewdirs, # same as above embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) ), ), render=dict( # render model type='NerfRender', white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) raw_noise_std= 1e0, # std dev of noise added to regularize sigma_a output, 1e0 recommended ), ) basedata_cfg = dict( dataset_type=dataset_type, datadir='data/nerf_llff_data/#DATANAME#', half_res=False, # load blender synthetic data at 400x400 instead of 800x800 testskip= 8, # will load 1/N images from test/val sets, useful for large datasets like deepvoxels N_rand_per_sampler=N_rand_per_sampler, llffhold=8, # will take every 1/N images as LLFF test set, paper uses 8 no_ndc=no_ndc, white_bkgd=white_bkgd, spherify=False, # set for spherical 360 scenes shape='greek', # options : armchair / cube / greek / vase factor=8, # downsample factor for LLFF images is_batching=True, # True for blender, False for llff mode='train', ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() testdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) testdata_cfg.update(dict(mode='test', testskip=0)) train_pipeline = [ dict( type='BatchSample', enable=True, N_rand=N_rand_per_sampler, ), dict(type='DeleteUseless', keys=['rays_rgb', 'idx']), dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s'], ), dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=['iter_n']), # iter_n ] test_pipeline = [ dict( type='ToTensor', enable=True, keys=['pose'], ), dict( type='GetRays', enable=True, ), dict(type='FlattenRays', enable=True), # 原来是(H, W, ..) 变成(H*W, ...) 记录下原来的尺寸 dict( type='GetViewdirs', enable=use_viewdirs, ), dict( type='ToNDC', enable=(not no_ndc), ), dict(type='GetBounds', enable=True), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # 同上train_pipeline dict(type='PerturbZvals', enable=False), # 测试集不扰动 dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=['pose']), # 删除pose 其实求完ray就不再需要了 ] data = dict( train_loader=dict(batch_size=4, num_workers=4), train=dict( type='SceneBaseDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='SceneBaseDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='SceneBaseDataset', cfg=testdata_cfg, pipeline=test_pipeline, # same pipeline as validation ), ) ================================================ FILE: configs/neuralbody/nb_zjumocap_313.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'neuralbody' # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/neuralbody/zjumocap_313/' # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_frame = 60 model = dict( type='NeuralBodyNetwork', cfg=dict( raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) use_viewdirs=use_viewdirs, is_perturb=is_perturb, chunk=1024 * 4, # mainly work for val smpl_embedder=dict( type='SmplEmbedder', voxel_size=[0.005, 0.005, 0.005], ), num_train_frame=num_train_frame, nerf_mlp=dict( type='NB_NeRFMLP', num_frame=num_train_frame, embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x).split('_')[4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x).split('_')[4]) - 1 frame_interval = 1 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/zju_mocap/CoreView_313', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=0.5, # reduce the image resolution by ratio unit=1000., training_view=[0, 6, 12, 18], test_view=[1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 19, 20], num_train_frame=num_train_frame, training_frame=[0, num_train_frame * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] data = dict( train_loader=dict(batch_size=1, num_workers=0), train=dict( type='NeuralBodyDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/neuralbody/nb_zjumocap_315.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'neuralbody' # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/neuralbody/zjumocap_315/' # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_frame = 400 model = dict( type='NeuralBodyNetwork', cfg=dict( raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) use_viewdirs=use_viewdirs, is_perturb=is_perturb, chunk=1024 * 4, # mainly work for val smpl_embedder=dict( type='SmplEmbedder', voxel_size=[0.005, 0.005, 0.005], ), num_train_frame=num_train_frame, nerf_mlp=dict( type='NB_NeRFMLP', num_frame=num_train_frame, embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x).split('_')[4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x).split('_')[4]) - 1 frame_interval = 1 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/zju_mocap/CoreView_315', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=0.5, # reduce the image resolution by ratio unit=1000., training_view=[0, 6, 12, 18], test_view=[1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 19, 20], num_train_frame=num_train_frame, training_frame=[0, num_train_frame * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] data = dict( train_loader=dict(batch_size=1, num_workers=0), train=dict( type='NeuralBodyDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/neuralbody/nb_zjumocap_377.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'neuralbody' # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/neuralbody/zjumocap_377/' # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_frame = 300 model = dict( type='NeuralBodyNetwork', cfg=dict( raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) use_viewdirs=use_viewdirs, is_perturb=is_perturb, chunk=1024 * 4, # mainly work for val smpl_embedder=dict( type='SmplEmbedder', voxel_size=[0.005, 0.005, 0.005], ), num_train_frame=num_train_frame, nerf_mlp=dict( type='NB_NeRFMLP', num_frame=num_train_frame, embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 1 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/zju_mocap/CoreView_377', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=0.5, # reduce the image resolution by ratio unit=1000., training_view=[0, 6, 12, 18], test_view=[ 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 19, 20, 21, 22 ], num_train_frame=num_train_frame, training_frame=[0, num_train_frame * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] data = dict( train_loader=dict(batch_size=1, num_workers=0), train=dict( type='NeuralBodyDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/neuralbody/nb_zjumocap_386.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'neuralbody' # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/neuralbody/zjumocap_386/' # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_frame = 300 model = dict( type='NeuralBodyNetwork', cfg=dict( raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) use_viewdirs=use_viewdirs, is_perturb=is_perturb, chunk=1024 * 4, # mainly work for val smpl_embedder=dict( type='SmplEmbedder', voxel_size=[0.005, 0.005, 0.005], ), num_train_frame=num_train_frame, nerf_mlp=dict( type='NB_NeRFMLP', num_frame=num_train_frame, embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 1 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/zju_mocap/CoreView_386', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=0.5, # reduce the image resolution by ratio unit=1000., training_view=[0, 6, 12, 18], test_view=[ 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 19, 20, 21, 22 ], num_train_frame=num_train_frame, training_frame=[0, num_train_frame * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] data = dict( train_loader=dict(batch_size=1, num_workers=0), train=dict( type='NeuralBodyDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/neuralbody/nb_zjumocap_387.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'neuralbody' # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/neuralbody/zjumocap_387/' # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_frame = 300 model = dict( type='NeuralBodyNetwork', cfg=dict( raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) use_viewdirs=use_viewdirs, is_perturb=is_perturb, chunk=1024 * 4, # mainly work for val smpl_embedder=dict( type='SmplEmbedder', voxel_size=[0.005, 0.005, 0.005], ), num_train_frame=num_train_frame, nerf_mlp=dict( type='NB_NeRFMLP', num_frame=num_train_frame, embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 1 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/zju_mocap/CoreView_387', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=0.5, # reduce the image resolution by ratio unit=1000., training_view=[0, 6, 12, 18], test_view=[ 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 19, 20, 21, 22 ], num_train_frame=num_train_frame, training_frame=[0, num_train_frame * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] data = dict( train_loader=dict(batch_size=1, num_workers=0), train=dict( type='NeuralBodyDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/neuralbody/nb_zjumocap_390.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'neuralbody' # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/neuralbody/zjumocap_390/' # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_frame = 300 model = dict( type='NeuralBodyNetwork', cfg=dict( raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) use_viewdirs=use_viewdirs, is_perturb=is_perturb, chunk=1024 * 4, # mainly work for val smpl_embedder=dict( type='SmplEmbedder', voxel_size=[0.005, 0.005, 0.005], ), num_train_frame=num_train_frame, nerf_mlp=dict( type='NB_NeRFMLP', num_frame=num_train_frame, embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 1 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/zju_mocap/CoreView_390', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=0.5, # reduce the image resolution by ratio unit=1000., training_view=[0, 6, 12, 18], test_view=[ 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 19, 20, 21, 22 ], num_train_frame=num_train_frame, training_frame=[700, 700 + num_train_frame * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] data = dict( train_loader=dict(batch_size=1, num_workers=0), train=dict( type='NeuralBodyDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/neuralbody/nb_zjumocap_392.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'neuralbody' # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/neuralbody/zjumocap_392/' # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_frame = 300 model = dict( type='NeuralBodyNetwork', cfg=dict( raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) use_viewdirs=use_viewdirs, is_perturb=is_perturb, chunk=1024 * 4, # mainly work for val smpl_embedder=dict( type='SmplEmbedder', voxel_size=[0.005, 0.005, 0.005], ), num_train_frame=num_train_frame, nerf_mlp=dict( type='NB_NeRFMLP', num_frame=num_train_frame, embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 1 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/zju_mocap/CoreView_392', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=0.5, # reduce the image resolution by ratio unit=1000., training_view=[0, 6, 12, 18], test_view=[ 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 19, 20, 21, 22 ], num_train_frame=num_train_frame, training_frame=[0, num_train_frame * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] data = dict( train_loader=dict(batch_size=1, num_workers=0), train=dict( type='NeuralBodyDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/neuralbody/nb_zjumocap_393.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'neuralbody' # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/neuralbody/zjumocap_393/' # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_frame = 300 model = dict( type='NeuralBodyNetwork', cfg=dict( raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) use_viewdirs=use_viewdirs, is_perturb=is_perturb, chunk=1024 * 4, # mainly work for val smpl_embedder=dict( type='SmplEmbedder', voxel_size=[0.005, 0.005, 0.005], ), num_train_frame=num_train_frame, nerf_mlp=dict( type='NB_NeRFMLP', num_frame=num_train_frame, embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 1 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/zju_mocap/CoreView_393', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=0.5, # reduce the image resolution by ratio unit=1000., training_view=[0, 6, 12, 18], test_view=[ 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 19, 20, 21, 22 ], num_train_frame=num_train_frame, training_frame=[0, num_train_frame * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] data = dict( train_loader=dict(batch_size=1, num_workers=0), train=dict( type='NeuralBodyDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/neuralbody/nb_zjumocap_394.py ================================================ _base_ = [ # '../_base_/models/nerf.py', # '../_base_/schedules/adam_20w_iter.py', # '../_base_/default_runtime.py' ] import os from datetime import datetime method = 'neuralbody' # optimizer optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) lr_rate = 5e-4 max_iters = 2000000 evalute_config = dict() lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) checkpoint_config = dict(interval=10000, by_epoch=False) log_level = 'INFO' log_config = dict(interval=10000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 10000), ('val', 1)] # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否多卡,mmcv对dp多卡支持不好,故而要么单卡要么ddp多卡 work_dir = './work_dirs/neuralbody/zjumocap_394/' # noqa timestamp = datetime.now().strftime('%d-%b-%H-%M') # shared params by model and data and ... dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True # 源代码中'if args.dataset_type != 'llff' or args.no_ndc:' 就设置no_ndc white_bkgd = False # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 1 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') load_from = os.path.join(work_dir, 'latest.pth') num_train_frame = 300 model = dict( type='NeuralBodyNetwork', cfg=dict( raw_noise_std= 0, # std dev of noise added to regularize sigma_a output, 1e0 recommended white_bkgd= white_bkgd, # set to render synthetic data on a white bkgd (always use for dvoxels) use_viewdirs=use_viewdirs, is_perturb=is_perturb, chunk=1024 * 4, # mainly work for val smpl_embedder=dict( type='SmplEmbedder', voxel_size=[0.005, 0.005, 0.005], ), num_train_frame=num_train_frame, nerf_mlp=dict( type='NB_NeRFMLP', num_frame=num_train_frame, embedder=dict( type='BaseEmbedder', i_embed=0, # set 0 for default positional encoding, -1 for none multires= 10, # log2 of max freq for positional encoding (3D location) multires_dirs= 4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) )), bs_data= 'rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), render=dict( # render model type='NerfRender', ), ) img_path_to_smpl_idx = lambda x: int(os.path.basename(x)[:-4]) img_path_to_frame_idx = lambda x: int(os.path.basename(x)[:-4]) frame_interval = 1 val_frame_interval = 30 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/zju_mocap/CoreView_394', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=0.5, # reduce the image resolution by ratio unit=1000., training_view=[0, 6, 12, 18], test_view=[ 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 19, 20, 21, 22 ], num_train_frame=num_train_frame, training_frame=[0, num_train_frame * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) train_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), # 与batching型dataset不同的是, 需要从pose生成rays dict(type='NBSelectRays', enable=True, sel_n=N_rand_per_sampler), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] test_pipeline = [ dict( type='LoadImageAndCamera', enable=True, ), # 读取图片和相机参数 dict( type='LoadSmplParam', enable=True, ), # 读取SMPL参数 dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'target_s', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'ims', 'cfg', 'data_root', 'idx', 'img_path', 'num_cams' ]), ] data = dict( train_loader=dict(batch_size=1, num_workers=0), train=dict( type='NeuralBodyDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), ) ================================================ FILE: configs/neuralbody/nb_zjumocap_render_313.py ================================================ _base_ = ['nb_zjumocap_313.py'] from configs.neuralbody.nb_zjumocap_313 import * test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='NBSaveSpiralHook', params=dict()), ] ratio = 0.5 basedata_cfg = dict( dataset_type=dataset_type, datadir='data/zju_mocap/CoreView_313', smpl_vertices_dir='new_vertices', smpl_params_dir='new_params', ratio=ratio, # reduce the image resolution by ratio unit=1000., training_view=[0, 6, 12, 18], test_view=[1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 19, 20], num_train_frame=num_train_frame, training_frame=[0, num_train_frame * frame_interval ], # [begin_frame, end_frame] frame_interval=frame_interval, val_frame_interval=val_frame_interval, white_bkgd=white_bkgd, mode='train', img_path_to_smpl_idx=img_path_to_smpl_idx, img_path_to_frame_idx=img_path_to_frame_idx, ) frame_idx_to_smpl_idx = lambda x: x + 1 frame_idx_to_latent_idx = lambda x: x valdata_cfg = basedata_cfg.copy() valdata_cfg.update( dict(mode='render', num_render_views=50, frame_idx=0, frame_idx_to_smpl_idx=frame_idx_to_smpl_idx, frame_idx_to_latent_idx=frame_idx_to_latent_idx, render_H=int(1024 * ratio), render_W=int(1024 * ratio), ratio=ratio)) test_pipeline = [ dict( type='LoadCamAndSmplParam', enable=True, ), # 读取相机和Smpl参数 dict( type='NBGetRays', enable=True, ), dict(type='NBSelectRays', enable=True, sel_all=True, sel_rgb=False), # 抽取N个射线 dict( type='ToTensor', enable=True, keys=['rays_o', 'rays_d', 'near', 'far', 'mask_at_box'], ), dict(type='GetZvals', enable=True, lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=False), dict(type='GetPts', enable=True), dict(type='DeleteUseless', enable=True, keys=[ 'iter_n', 'cams', 'cam_inds', 'cfg', 'data_root', 'idx', 'spiral_poses', 'K' ]), ] data.update( dict(test=dict( type='NeuralBodyDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), )) ================================================ FILE: docker/Dockerfile ================================================ ARG PYTORCH="1.9.0" ARG CUDA="11.1" ARG CUDNN="8" FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" RUN rm /etc/apt/sources.list.d/cuda.list RUN rm /etc/apt/sources.list.d/nvidia-ml.list RUN apt-key del 7fa2af80 # RUN apt-get update && apt-get install -y --no-install-recommends wget --assume-yes apt-utils # RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-keyring_1.0-1_all.deb # RUN dpkg -i cuda-keyring_1.0-1_all.deb RUN apt-get update && \ apt-get install git ninja-build ffmpeg libsm6 libxext6 vim -y -f && \ apt-get install build-essential -y && \ apt-get install wget -y && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* # Install torch1.10 and mmcv-full RUN wget https://download.pytorch.org/whl/cu111/torch-1.10.0%2Bcu111-cp37-cp37m-linux_x86_64.whl RUN pip install torch-1.10.0+cu111-cp37-cp37m-linux_x86_64.whl && \ pip cache purge && rm torch-1.10.0+cu111-cp37-cp37m-linux_x86_64.whl RUN pip install opencv-python>=3 yapf imageio scikit-image && \ pip cache purge RUN pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.10.0/index.html && \ pip cache purge RUN pip install coverage pytest && \ pip cache purge # Install neural-body needed pkgs RUN pip install spconv-cu111 && \ pip cache purge RUN pip install lpips trimesh matplotlib smplx && \ pip cache purge RUN git clone https://github.com/facebookresearch/pytorch3d.git RUN cd pytorch3d && pip install -e . && \ pip cache purge # Install tcnn RUN git clone --recursive https://github.com/nvlabs/tiny-cuda-nn # below may meet error, because 'docker build' runs without gpus by default # https://stackoverflow.com/questions/59691207/docker-build-with-nvidia-runtime RUN cd tiny-cuda-nn/bindings/torch && python setup.py install # Install xrnerf extension RUN git clone https://github.com/openxrlab/xrnerf.git RUN cd xrnerf/extensions/mesh_grid && python setup.py install RUN cd xrnerf/extensions/ngp_raymarch && python setup.py build_ext --inplace && python setup.py install # Verification RUN cd xrnerf && coverage run --source xrnerf/models -m pytest -s test/models && coverage report -m ================================================ FILE: docker/DockerfileCN ================================================ ARG PYTORCH="1.9.0" ARG CUDA="11.1" ARG CUDNN="8" FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel # ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" RUN rm /etc/apt/sources.list.d/cuda.list RUN rm /etc/apt/sources.list.d/nvidia-ml.list RUN apt-key del 7fa2af80 ADD docker/sources.list /etc/apt/ # RUN apt-get update && apt-get install -y --no-install-recommends wget --assume-yes apt-utils # RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-keyring_1.0-1_all.deb # RUN dpkg -i cuda-keyring_1.0-1_all.deb RUN apt-get update && \ apt-get install git ninja-build ffmpeg libsm6 libxext6 vim -y -f && \ apt-get install build-essential -y && \ apt-get install wget -y && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* # Install torch1.10 and mmcv-full RUN wget https://download.pytorch.org/whl/cu111/torch-1.10.0%2Bcu111-cp37-cp37m-linux_x86_64.whl RUN pip install torch-1.10.0+cu111-cp37-cp37m-linux_x86_64.whl -i https://pypi.tuna.tsinghua.edu.cn/simple && \ pip cache purge && rm torch-1.10.0+cu111-cp37-cp37m-linux_x86_64.whl RUN pip install opencv-python>=3 yapf imageio scikit-image -i https://pypi.doubanio.com/simple && \ pip cache purge RUN pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.10.0/index.html && \ pip cache purge RUN pip install coverage pytest -i https://pypi.tuna.tsinghua.edu.cn/simple && \ pip cache purge # Install neural-body needed pkgs RUN pip install spconv-cu111 -i https://pypi.tuna.tsinghua.edu.cn/simple && \ pip cache purge RUN pip install lpips trimesh matplotlib smplx -i https://pypi.tuna.tsinghua.edu.cn/simple && \ pip cache purge RUN git clone https://gitclone.com/github.com/facebookresearch/pytorch3d.git RUN cd pytorch3d && pip install -e . -i https://pypi.tuna.tsinghua.edu.cn/simple && \ pip cache purge # Install tcnn # (If meet network problem, commented below out, download & install manually) RUN git clone https://gitclone.com/github.com/nvlabs/tiny-cuda-nn RUN cd tiny-cuda-nn/dependencies && git clone https://gitclone.com/github.com/fmtlib/fmt.git RUN cd tiny-cuda-nn/dependencies && git clone https://gitclone.com/github.com/NVIDIA/cutlass.git RUN cd tiny-cuda-nn/bindings/torch && python setup.py install # gitclone收录日,家祭无忘告乃翁 # 0907 update:已收录,以下取消注释 RUN git clone https://gitclone.com/github.com/openxrlab/xrnerf.git RUN cd xrnerf/extensions/mesh_grid && python setup.py install RUN cd xrnerf/extensions/ngp_raymarch && python setup.py build_ext --inplace && python setup.py install # 运行ut验证安装 RUN cd xrnerf && coverage run --source xrnerf/models -m pytest -s test/models && coverage report -m ================================================ FILE: docker/daemon.json ================================================ { "runtimes": { "nvidia": { "path": "/usr/bin/nvidia-container-runtime", "runtimeArgs": [] } }, "default-runtime": "nvidia" } ================================================ FILE: docker/sources.list ================================================ deb http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse deb http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse deb http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse deb http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse deb http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse ================================================ FILE: docs/en/CONTRIBUTING.md ================================================ # Contributing to XRNeRF All kinds of contributions are welcome, including but not limited to the following. - Fixes (typo, bugs) - New features and components ## Workflow 1. Fork and pull the latest xrnerf 1. Checkout a new branch with a meaningful name (do not use master branch for PRs) 1. Commit your changes 1. Create a PR ```{note} - If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first. - If you are the author of some papers and would like to include your method to xrnerf, please contact us. We will much appreciate your contribution. ``` ## Code style ### Python We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style. We use the following tools for linting and formatting: - [flake8](http://flake8.pycqa.org/en/latest/): linter - [yapf](https://github.com/google/yapf): formatter - [isort](https://github.com/timothycrosley/isort): sort imports Style configurations of yapf and isort can be found in [setup.cfg](../setup.cfg). We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, fixes `end-of-files`, sorts `requirments.txt` automatically on every commit. The config for a pre-commit hook is stored in [.pre-commit-config](../.pre-commit-config.yaml). After you clone the repository, you will need to install initialize pre-commit hook. ``` pip install -U pre-commit ``` From the repository folder ``` pre-commit install ``` If you are facing an issue when installing markdown lint, you may install ruby for markdown lint by referring to [this repo](https://github.com/innerlee/setup) by following the usage and taking [`zzruby.sh`](https://github.com/innerlee/setup/blob/master/zzruby.sh) or by the following steps ```shell # install rvm curl -L https://get.rvm.io | bash -s -- --autolibs=read-fail rvm autolibs disable # install ruby rvm install 2.7.1 ``` After this on every commit check code linters and formatter will be enforced. > Before you create a PR, make sure that your code lints and is formatted by yapf. ### C++ and CUDA We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). ================================================ FILE: docs/en/additional_licenses.md ================================================ # Additional Licenses We would like to pay tribute to open-source implementations to which we make reference. Note that they may carry additional license requiresments. ## instant-ngp Copyright (c) 2022, NVIDIA Corporation & affiliates. All rights reserved. NVIDIA Source Code License for instant neural graphics primitives ======================================================================= 1. Definitions "Licensor" means any person or entity that distributes its Work. "Software" means the original work of authorship made available under this License. "Work" means the Software and any additions to or derivative works of the Software that are made available under this License. The terms "reproduce," "reproduction," "derivative works," and "distribution" have the meaning as provided under U.S. copyright law; provided, however, that for the purposes of this License, derivative works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work. Works, including the Software, are "made available" under this License by including in or with the Work either (a) a copyright notice referencing the applicability of this License to the Work, or (b) a copy of this License. 2. License Grants 2.1 Copyright Grant. Subject to the terms and conditions of this License, each Licensor grants to you a perpetual, worldwide, non-exclusive, royalty-free, copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, sublicense and distribute its Work and any resulting derivative works in any form. 3. Limitations 3.1 Redistribution. You may reproduce or distribute the Work only if (a) you do so under this License, (b) you include a complete copy of this License with your distribution, and (c) you retain without modification any copyright, patent, trademark, or attribution notices that are present in the Work. 3.2 Derivative Works. You may specify that additional or different terms apply to the use, reproduction, and distribution of your derivative works of the Work ("Your Terms") only if (a) Your Terms provide that the use limitation in Section 3.3 applies to your derivative works, and (b) you identify the specific derivative works that are subject to Your Terms. Notwithstanding Your Terms, this License (including the redistribution requirements in Section 3.1) will continue to apply to the Work itself. 3.3 Use Limitation. The Work and any derivative works thereof only may be used or intended for use non-commercially. Notwithstanding the foregoing, NVIDIA and its affiliates may use the Work and any derivative works commercially. As used herein, "non-commercially" means for research or evaluation purposes only. 3.4 Patent Claims. If you bring or threaten to bring a patent claim against any Licensor (including any claim, cross-claim or counterclaim in a lawsuit) to enforce any patents that you allege are infringed by any Work, then your rights under this License from such Licensor (including the grant in Section 2.1) will terminate immediately. 3.5 Trademarks. This License does not grant any rights to use any Licensor�s or its affiliates� names, logos, or trademarks, except as necessary to reproduce the notices described in this License. 3.6 Termination. If you violate any term of this License, then your rights under this License (including the grant in Section 2.1) will terminate immediately. 4. Disclaimer of Warranty. THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER THIS LICENSE. 5. Limitation of Liability. EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. ======================================================================= ================================================ FILE: docs/en/apis.md ================================================ # APIS ## run_nerf input: args, running parameters purpose: parse running parameters, and train, test or render a nerf model according to specified parameters ## train_nerf input: cfg, mmcv.Config purpose: parse running parameters, train a nerf model according to specified parameters ## test_nerf input: cfg, mmcv.Config purpose: parse running parameters, test or render a nerf model according to specified parameters ## parse_args input: args, running parameters purpose: parse running parameters, convert to a mmcv.Config ================================================ FILE: docs/en/benchmark.md ================================================ # Benchmark We compare our results with some popular frameworks and official releases in terms of speed. ## Settings ### Software Environment - Python 3.7 - PyTorch 1.10 - CUDA 11.1 - CUDNN 8.1.0 ## Main Results ### SceneNeRF #### NeRF
test data PSNR SSIM
NeRF XRNeRF NeRF XRNeRF
blender_chair 33.927 34.528 0.967 0.985
blender_drums 25.600 25.685 0.925 0.946
blender_ficus 30.13 29.300 0.964 0.972
blender_hotdog 36.18 35.905 0.974 0.985
blender_materials 29.62 29.014 0.949 0.967
blender_mic 32.58 32.95 0.980 0.986
blender_ship 28.65 29.46 0.856 0.932
llff_fern 25.17 26.277 0.792 0.892
llff_flower 27.40 26.592 0.827 0.884
llff_fortress 31.16 31.485 0.881 0.952
llff_horns 27.45 26.162 0.828 0.895
llff_leaves 20.92 19.749 0.690 0.668
#### Kilo-NeRF
test data PSNR SSIM elapsed_time(ms)
KiloNeRF XRNeRF KiloNerf XRNeRF KiloNerf XRNeRF
nsvf_Synthetic_NeRF_chair 33.044 33.037 0.971 0.979 384.98 407.78
nsvf_Synthetic_NeRF_drums 25.327 25.308 0.931 0.949 413.03 353.62
nsvf_Synthetic_NeRF_ficus 30.1 30.176 0.967 0.975 351.04 337.22
nsvf_Synthetic_NeRF_hotdog 32.316 33.408 0.974 0.986 484.22 491.49
nsvf_Synthetic_NeRF_lego 33.398 33.381 0.971 0.982 379.1 365.16
nsvf_Synthetic_NeRF_materials 29.193 29.175 0.951 0.966 380.28 358.57
nsvf_Synthetic_NeRF_mic 33.186 33.346 0.982 0.987 370.31 346.71
nsvf_Synthetic_NeRF_ship 28.892 29.295 0.874 0.933 491.92 488.35
Average 30.68 30.89102 0.9526 0.9697 406.86 393.61
#### Mip-NeRF
MultiScale Blender PSNR
800x800 400x400 200x200 100x100
Jax XRNeRF Jax XRNeRF Jax XRNeRF Jax XRNeRF
blender_ship 29.599 28.522 31.955 30.754 33.845 32.848 34.868 33.754
blender_mic 33.739 32.478 36.353 35.008 38.837 37.958 39.011 38.064
blender_materials 30.128 29.278 31.424 30.505 33.163 32.192 34.174 33.122
blender_lego 33.971 32.803 35.248 34.123 35.796 34.848 35.223 34.382
blender_hotdog 36.457 35.803 38.382 37.631 39.831 39.096 39.935 39.038
blender_ficus 31.490 29.222 32.267 30.093 33.255 31.655 33.606 31.785
blender_drums 25.297 24.790 26.463 26.020 27.808 27.510 28.791 28.369
blender_chair 33.351 32.429 36.517 35.618 38.056 37.342 37.950 37.257
Average 31.754 30.666 33.576 32.469 35.074 34.181 35.445 34.472
#### InstantNGP
test data PSNR
InstantNGP XRNeRF
blender_chair 32.927 32.71
blender_drums 26.02 26.9
blender_ficus 33.51 33.97
blender_hotdog 37.40 37.17
blender_lego 36.39 35.1
blender_materials 29.78 30.73
blender_mic 36.22 34.05
blender_ship 31.1 30.0
average 32.92 32.58
### HumanNeRF #### Neural Body
test data PSNR SSIM
Neural Body XRNeRF Neural Body XRNeRF
313 35.21 37.76 0.985 0.993
315 33.07 35.99 0.988 0.992
377 33.86 33.86 0.985 0.986
386 36.07 34.24 0.984 0.984
387 31.39 31.99 0.975 0.979
390 34.48 35.45 0.980 0.984
392 35.76 35.11 0.984 0.986
393 33.24 33.50 0.979 0.985
394 34.31 35.61 0.980 0.984
#### Animatable NeRF
test data (Novel pose) PSNR SSIM
Animatable NeRF XRNeRF Animatable NeRF XRNeRF
S1 30.11 31.98 0.981 0.984
S5 32.60 33.25 0.987 0.990
S6 29.49 30.12 0.972 0.974
S7 31.54 34.47 0.984 0.988
S8 30.77 32.01 0.983 0.985
S9 31.94 28.61 0.980 0.976
S11 33.12 33.43 0.986 0.986
#### GNR
test data PSNR SSIM
GNR XRNeRF GNR XRNeRF
amanda 23.62 25.35 0.93 0.95
barry 29.28 30.71 0.94 0.95
fuzhizhi 21.96 21.42 0.90 0.89
jinyutong 23.90 24.08 0.90 0.91
joseph 26.30 24.46 0.94 0.92
maria 21.51 23.69 0.90 0.90
mahaoran 28.41 30.93 0.93 0.94
natacha 28.71 27.98 0.91 0.91
soufianou 27.64 28.83 0.93 0.93
zhuna 25.40 24.32 0.93 0.92
================================================ FILE: docs/en/dataset_preparation.md ================================================ # Data Preparation We provide some tips for XRNeRF data preparation in this file. - [Data Preparation](#data-preparation) - [Getting Data](#getting-data) - [Dataset Organization](#dataset-organization) - [Dataset Download](#dataset-download) ## Getting Data #### Dataset Organization It is recommended to symlink the dataset root to $PROJECT/data. If your folder structure is different, you may need to change the corresponding paths in config files. ``` xrnerf ├── xrnerf ├── docs ├── configs ├── test ├── extensions ├── data │ ├── nerf_llff_data │ ├── nerf_synthetic │ ├── multiscale │ ├── multiscale_google │ ├── ... ``` #### Dataset Download 1. Download ```nerf_synthetic``` and ```nerf_llff_data``` from [here](https://drive.google.com/drive/folders/128yBriW1IG_3NJ5Rp7APSTZsJqdJdfc1), and put it under ```xrnerf/data``` 2. Credit to NSVF authors for providing [their datasets](https://github.com/facebookresearch/NSVF), read introductions [here](https://github.com/creiser/kilonerf#download-nsvf-datasets) 3. For mip-nerf training, you can generate the multiscale dataset used in the paper by running the following command, ```python tools/convert_blender_data.py --blenderdir /data/nerf_synthetic --outdir data/multiscale``` 4. For the training of NeuralBody, please download the dataset from [here](https://github.com/zju3dv/neuralbody/blob/master/INSTALL.md#zju-mocap-dataset). 5. For the training of Animatable NeRF, please download the dataset from [here](https://github.com/zju3dv/animatable_nerf/blob/master/INSTALL.md#human36m-dataset). 6. For the training of GNR, please download the dataset from [here](https://generalizable-neural-performer.github.io/genebody.html). 7. For the training of BungeeNeRF, please download the dataset from [here](https://drive.google.com/drive/folders/1ybq-BuRH0EEpcp5OZT9xEMi-Px1pdx4D?usp=sharing). ================================================ FILE: docs/en/faq.md ================================================ # FAQ ## Outline We list some common issues faced by many users and their corresponding solutions here. - [FAQ](#faq) - [Outline](#outline) - [Installation](#installation) - [Data](#data) - [Training](#training) - [Testing](#testing) - [Deploying](#deploying) Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them. ## Installation - **"No module named 'mmcv'"** 1. Install mmcv-full following the [installation instruction](https://mmcv.readthedocs.io/en/latest/#installation) - **"No module named 'raymarch'"** 1. Change workdir to extensions' directory using `cd extensions/ngp_raymarch` 2. Compile cuda extensions using `rm -rf build && clear && python setup.py build_ext --inplace` 3. Install cuda extensions using `python setup.py install` ================================================ FILE: docs/en/get_started.md ================================================ # Getting Started This page provides basic tutorials about the usage of XRNeRF. For installation instructions, please see [installation.md](installation.md). - [Getting Started](#getting-started) - [Datasets](#datasets) - [Build a Model](#build-a-model) - [Basic Concepts](#basic-concepts) - [Write a new network](#write-a-new-network) - [Installation](#installation) - [Train a Model](#train-a-model) - [Iteration Controls](#iteration-controls) - [Train](#train) - [Test](#test) - [Tutorials](#tutorials) - [Other Documents](#other-documents) ## Datasets It is recommended to symlink the dataset root to `$PROJECT/data`. If your folder structure is different, you may need to change the corresponding paths in config files. ``` xrnerf ├── xrnerf ├── docs ├── configs ├── test ├── extensions ├── data │ ├── nerf_llff_data │ ├── nerf_synthetic │ ├── multiscale │ ├── multiscale_google │ ├── ... ``` For more information on data preparation, please see [dataset_preparation.md](dataset_preparation.md) ## Build a Model ### Basic Concepts In XRNeRF, model components are basically categorized as 4 types. - network: the whole nerf model pipeline, usually contains a embedder, mlp and render. - embedder: convert point-position and viewdirection data into embedded data, embedder can be function only or with trainable paramters. - mlp: use the output of embedder as input, and output raw data (the rgb and density value at sampled position) for render, usually contains FC layers. - render: receive mlp's raw data, output the rgb value at a pixel. Following some basic pipelines (e.g., `NerfNetwork`), the model structure can be customized through config files with no pains. ### Write a new network To write a new nerf network, you need to inherit from `BaseNerfNetwork`, which defines the following abstract methods. - `train_step()`: forward method of the training mode. - `val_step()`: forward method of the testing mode. [NerfNetwork](../../xrnerf/models/networks/nerf.py) is a good example which show how to do that. To be specific, if we want to implement some new components, there are several things to do. 1. create a new file in `xrnerf/models/networks/my_networks.py`. ```python from ..builder import NETWORKS from .nerf import NerfNetwork @NETWORKS.register_module() class MyNerfNetwork(NerfNetwork): def __init__(self, cfg, mlp=None, mlp_fine=None, render=None): super().__init__(cfg, mlp, mlp_fine, render) def forward(self, data): .... def train_step(self, data, optimizer, **kwargs): .... def val_step(self, data, optimizer=None, **kwargs): .... ``` 2. Import the module in `xrnerf/models/networks/__init__.py` ```python from .my_networks import MyNerfNetwork ``` 3. modify the [config file](../../configs/nerf/nerf_blender_base01.py) from ```python model = dict( type='NerfNetwork', .... ``` to ```python model = dict( type='MyNerfNetwork', .... ``` To implement some new components for embedder/mlp/render, procedure is similar to above. * To write a new nerf embedder, you need to inherit from `nn.Module` or `BaseEmbedder`, and define the `forward` method. [BaseEmbedder](../../xrnerf/models/embedders/base.py) is a good example. * To write a new nerf mlp, you need to inherit from `nn.Module` or `BaseMLP`, and define the `forward` method. [NerfMLP](../../xrnerf/models/mlps/nerf_mlp.py) is a good example. * To write a new nerf render, you need to inherit from `nn.Module` or `BaseRender`, and define the `forward` method. [NerfRender](../../xrnerf/models/renders/nerf_render.py) is a good example. ## Installation We provide detailed [installation tutorial](installation.md) for xrnerf, users can install from scratch or use provided [dockerfile](../../docker/Dockerfile). It is recommended to start by creating a docker image: ```shell docker build -f ./docker/Dockerfile --rm -t xrnerf . ``` For more information, please follow our [installation tutorial](installation.md). ## Train a Model ### Iteration Controls XRnerf use `mmcv.runner.IterBasedRunner` to control training, and `mmcv.runner.EpochBasedRunner` to for test mode. In training mode, the `max_iters` in config file decide how many iters. In test mode, `max_iters` is forced to change to 1, which represents only 1 epoch to test. ### Train ```shell python run_nerf.py --config configs/nerf/nerf_blender_base01.py --dataname lego ``` Arguments are: - `--config`: config file path. - `--dataname`: select which data under dataset directory. ### Test We have provided model ```iter_200000.pth``` for test, download from [here](https://drive.google.com/file/d/147wRy3TFlRVrZdWqAgHNak7s6jiMZA1-/view?usp=sharing) ```shell python run_nerf.py --config configs/nerf/nerf_blender_base01.py --dataname lego --test_only --load_from iter_200000.pth ``` Arguments are: - `--config`: config file path. - `--dataname`: select which data under dataset directory. - `--test_only`: influence on whole testset once. - `--load_from`: load which checkpoint to test, this will overwrite the original `load_from` in config file to for convenience. ## Tutorials Currently, we provide some tutorials for users to * [learn about configs](tutorials/config.md) * [customize data pipelines](tutorials/data_pipeline.md) * [model define](tutorials/model.md) ## Other Documents Except for that,The document also includes the following * [api](api.md) * [dataset_preparation](dataset_preparation.md) * [installation](installation.md) ================================================ FILE: docs/en/installation.md ================================================ # Installation We provide some tips for XRNeRF installation in this file. - [Installation](#installation) - [Requirements](#requirements) - [Prepare environment](#prepare-environment) - [a. Install development libs.](#a-install-development-libs) - [b. Create a conda virtual environment and activate it.](#b-create-a-conda-virtual-environment-and-activate-it) - [c. Install PyTorch and torchvision](#c-install-pytorch-and-torchvision) - [d. Install Other Needed Python Packages](#d-install-other-needed-python-packages) - [e. Install Extensions](#e-install-extensions) - [d. Download smpl_t_pose to surport GNR](#d-download-smpl_t_pose-to-surport-gnr) - [Another option: Docker Image](#another-option-docker-image) - [a. Build an Image](#a-build-an-image) - [b. Create a Container](#b-create-a-container) - [Verification](#verification) ## Requirements - Linux - Python 3.7+ - **PyTorch 1.10+ (necessary)** - **CUDA 11.0+ (necessary)** - GCC 7.5+ - build-essential: Install by `apt-get install -y build-essential git ninja-build ffmpeg libsm6 libxext6 libgl1` - [mmcv-full](https://github.com/open-mmlab/mmcv) - Numpy - ffmpeg (4.2 is preferred) - [opencv-python 3+](https://github.com/dmlc/decord): Install by `pip install opencv-python>=3` - [imageio](https://github.com/dmlc/decord): Install by `pip install imageio` - [scikit-image](https://github.com/dmlc/decord): Install by `pip install scikit-image` - [lpips](https://github.com/richzhang/PerceptualSimilarity): Install by `pip install lpips` - [trimesh](https://github.com/mikedh/trimesh): Install by `pip install trimesh` - [smplx](https://github.com/vchoutas/smplx): Install by `pip install smplx` - [spconv](https://github.com/dmlc/decord): Install proper vision that matches your cuda-vision, for example `pip install spconv-cu113` - [pytorch3d](https://github.com/dmlc/decord): Install by `pip install "git+https://github.com/facebookresearch/pytorch3d.git@stable"` About hardware requirements: Instant-NGP need GPU-ARCH>=75, which means that at least a RTX 20X0 is required to have a full support. | RTX 30X0 | A100 | RTX 20X0 | TITAN V / V100 | GTX 10X0 / TITAN Xp | GTX 9X0 | K80 | |:--------:|:----:|:--------:|:--------------:|:-------------------:|:-------:|:---:| | 86 | 80 | 75 | 70 | 61 | 52 | 37 | If you don't need instant-ngp, [spconv](https://github.com/traveller59/spconv#spconv-spatially-sparse-convolution-library) depends the minimum cuda version. So at least cuda 10.2 is needed. ## Prepare environment #### a. Install development libs. ```shell sudo apt install libgl-dev freeglut3-dev build-essential git ninja-build ffmpeg libsm6 libxext6 libgl1 ``` #### b. Create a conda virtual environment and activate it. ```shell conda create -n xrnerf python=3.7 -y conda activate xrnerf ``` #### c. Install PyTorch and torchvision 1. check pytorch-cuda vision match table from [here](https://pytorch.org/get-started/previous-versions/) or [here](https://blog.csdn.net/weixin_42069606/article/details/105198845) 2. find a proper torch vision (>=1.10.0 and match your cuda vision) from [here](https://download.pytorch.org/whl/torch_stable.html), like ```cu111/torch-1.10.0%2Bcu111-cp37-cp37m-linux_x86_64.whl```, download the whl file 3. install your whl file, for example ```pip install torch-1.10.0+cu111-cp37-cp37m-linux_x86_64.whl``` 4. check [here](https://pypi.org/project/torchvision/) and install specified vision of torchvision, for example ```pip install torchvision==0.12.0``` #### d. Install Other Needed Python Packages * you can use ```pip install requirements.txt``` to install most of the needed pkgs. If this step succeeds, you should jump to ```kilo-cuda``` and ```spconv``` step to install them manually. Or you can skip this step and follow the installation steps below * ```pip install 'opencv-python>=3' yapf imageio scikit-image lpips trimesh smplx``` * install ```mmcv-full``` following their [Installation](https://mmcv.readthedocs.io/en/latest/get_started/installation.html) * install ```spconv``` using pip install, for example ```pip install spconv-cu111```. notice that only specified cuda-vision are supported, following their [Installation](https://github.com/traveller59/spconv) * install ```pytorch3d``` using ```pip install "git+https://github.com/facebookresearch/pytorch3d.git@stable"``` * install ```kilo-cuda``` following their [Installation](https://github.com/creiser/kilonerf#option-b-build-cuda-extension-yourself)(optional, only needed for kilo-nerf) * install ```tcnn``` using ```pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch```, or following their [Installation](https://github.com/NVlabs/tiny-cuda-nn#pytorch-extension)(optional, only needed for instant-ngp) #### e. Install Extensions * build cuda-extension ```raymarch``` for instant-ngp supported, following [ngp_raymarch](../../extensions/ngp_raymarch/README.md) * build cuda-extension ```mesh_grid``` for gnr supported, following [mesh_grid](../../extensions/mesh_grid/README.md) #### d. Download smpl_t_pose to surport GNR * In order to support the ```GNR``` algorithm, you need to download the ```smpl_t_pose``` folder from [GNR](https://github.com/generalizable-neural-performer/gnr), and modify ```basedata_cfg.t_pose_path``` in ```configs/gnr/gnr_genebody.py``` to the corresponding storage location ## Another option: Docker Image You need to set docker daemon, to enable docker-build's gpu support (for cuda extension install). ```shell sudo apt-get install nvidia-container-runtime -f -y sudo cp -f docker/daemon.json /etc/docker sudo systemctl restart docker ``` See [here](https://stackoverflow.com/questions/59691207/docker-build-with-nvidia-runtime) for detail. #### a. Build an Image We provide a [Dockerfile](../../docker/Dockerfile) to build an image. ```shell docker build -f ./docker/Dockerfile --rm -t xrnerf . ``` **Important:** Make sure you've installed the [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). #### b. Create a Container Create a container with command: ```shell docker run --gpus all -it xrnerf /bin/bash ``` Open a teiminal in your host computer, copy project into docker container ```shell # d287273af72e is container id, using 'docker ps -a' to find id docker cp ProjectPath/xrnerf d287273af72e:/workspace ``` ## Verification To verify whether XRNeRF and the required environment are installed correctly, we can run unit-test python codes ```shell coverage run --source xrnerf/models -m pytest -s test/models && coverage report -m ``` Notice that ```coverage``` and ```pytest``` need to be installed before that ``` pip install coverage pytest -i https://pypi.tuna.tsinghua.edu.cn/simple ``` ================================================ FILE: docs/en/tutorials/config.md ================================================ # Tutorial 1: Learn about Configs We use python files as configs, incorporate modular and inheritance design into our config system, which is convenient to conduct various experiments. You can find all the provided configs under `$PROJECT/configs`. - [Tutorial 1: Learn about Configs](#tutorial-1-learn-about-configs) - [Configuration Components](#configuration-components) ## Configuration Components We can logically divide the configuration file into components: * training * model * data The fllowing content explain these configuration components one by one. * training training configurations contains all paramters to control model training, include optimizer, hooks, runner and soon on. ```python import os from datetime import datetime method = 'nerf' # which nerf method # optimizer setting optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) max_iters = 20000 # train for how many iters lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) # learning rate and decay checkpoint_config = dict(interval=5000, by_epoch=False) # when to save checkpoint log_level = 'INFO' log_config = dict(interval=5000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 5000), ('val', 1)] # loop: train 5000 iters, validate 1 iter # hooks # 'params' are numeric type value, 'variables' are variables in local environment train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='SaveSpiralHook', params=dict(save_folder='visualizations/spiral')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset dict(type='OccupationHook', params=dict()), # no need for open-source vision ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # whether to use ddp work_dir = './work_dirs/nerfsv3/nerf_#DATANAME#_base01/' # where to save ckpt, images, video, logs timestamp = datetime.now().strftime('%d-%b-%H-%M') # to make sure different log-files each train # some shared params by model and data, to avoid define twice dataset_type = 'blender' no_batching = True # only take random rays from 1 image at a time no_ndc = True white_bkgd = True # set to render synthetic data on a white bkgd (always use for dvoxels) is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 4 # how many N_rand in get_item() function lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # number of coarse samples per ray # resume_from = os.path.join(work_dir, 'latest.pth') # load_from = os.path.join(work_dir, 'latest.pth') ``` * model define network structure, a network is usually composed of embedder, mlp and render. ```python model = dict( type='NerfNetwork', # network class name cfg=dict( phase='train', # 'train' or 'test' N_importance=128, # number of additional fine samples per ray is_perturb=is_perturb, # see above chunk=1024 * 32, # mainly work for val, to avoid oom bs_data='rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), mlp=dict( # coarse mlp model type='NerfMLP', # mlp class name skips=[4], netdepth=8, # layers in network netwidth=256, # channels per layer netchunk=1024 * 32, # to avoid oom output_ch=5, # 5 if cfg.N_importance>0 else 4 use_viewdirs=use_viewdirs, embedder=dict( type='BaseEmbedder', # embedder class name i_embed=0, # set 0 for default positional encoding, -1 for none multires=10, # log2 of max freq for positional encoding (3D location) multires_dirs=4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) ), ), mlp_fine=dict( # fine model type='NerfMLP', skips=[4], netdepth=8, netwidth=256, netchunk=1024 * 32, output_ch=5, use_viewdirs=use_viewdirs, embedder=dict( type='BaseEmbedder', i_embed=0, multires=10, multires_dirs=4, ), ), render=dict( type='NerfRender', # render cloass name white_bkgd=white_bkgd, # see above raw_noise_std=0, # std dev of noise added to regularize sigma_a output, 1e0 recommended ), ) ``` * data define network structure, a network is usually composed of embedder, mlp and render. ```python basedata_cfg = dict( dataset_type=dataset_type, datadir='data/nerf_synthetic/#DATANAME#', half_res=True, # load blender synthetic data at 400x400 instead of 800x800 testskip= 8, # will load 1/N images from test/val sets, useful for large datasets like deepvoxels white_bkgd=white_bkgd, is_batching=False, # True for blender, False for llff mode='train', ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() testdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) testdata_cfg.update(dict(mode='test', testskip=0)) train_pipeline = [ dict(type='Sample'), dict(type='DeleteUseless', keys=['images', 'poses', 'i_data', 'idx']), dict(type='ToTensor', keys=['pose', 'target_s']), dict(type='GetRays'), dict(type='SelectRays', sel_n=N_rand_per_sampler, precrop_iters=500, precrop_frac=0.5), # in the first 500 iter, select rays inside center of image dict(type='GetViewdirs', enable=use_viewdirs), dict(type='ToNDC', enable=(not no_ndc)), dict(type='GetBounds'), dict(type='GetZvals', lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts'), dict(type='DeleteUseless', keys=['pose', 'iter_n']), ] test_pipeline = [ dict(type='ToTensor', keys=['pose']), dict(type='GetRays'), dict(type='FlattenRays'), dict(type='GetViewdirs', enable=use_viewdirs), dict(type='ToNDC', enable=(not no_ndc)), dict(type='GetBounds'), dict(type='GetZvals', lindisp=lindisp, N_samples=N_samples), dict(type='PerturbZvals', enable=False), # do not perturb when test dict(type='GetPts'), dict(type='DeleteUseless', keys=['pose']), ] data = dict( train_loader=dict(batch_size=1, num_workers=4), train=dict( type='SceneBaseDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='SceneBaseDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='SceneBaseDataset', cfg=testdata_cfg, pipeline=test_pipeline, # same pipeline as validation ), ) ``` ================================================ FILE: docs/en/tutorials/data_pipeline.md ================================================ # Tutorial 2: Customize Data Pipelines In this tutorial, we will introduce some methods about the design of data pipelines, and how to customize and extend your own data pipelines for the project. - [Tutorial 2: Customize Data Pipelines](#tutorial-2-customize-data-pipelines) - [Concept of Data Pipelines](#concept-of-data-pipelines) - [Design of Data Pipelines](#design-of-data-pipelines) ## Concept of Data Pipelines Data Pipeline is a modular form for data process. We make common data processing operations into python class, which named ```pipeline```. The following code block shows how to define a pipeline class to calculate viewdirs from rays' direction. ```python @PIPELINES.register_module() class GetViewdirs: """get viewdirs from rays_d """ def __init__(self, enable=True, **kwargs): self.enable = enable def __call__(self, results): """get viewdirs Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ if self.enable: viewdirs = results['rays_d'].clone() viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True) viewdirs = torch.reshape(viewdirs, [-1, 3]).float() results['viewdirs'] = viewdirs return results ``` To use the `GetViewdirs`, we can simply add `dict(type='GetViewdirs')` to `train_pipeline` in config file. ## Design of Data Pipelines We logically divide data process pipeline into 4 python files: * `creat.py` create or calculate new variables. * `augment.py` data augmentation operations. * `transforms.py` convert data type or change coordinate system. * `compose.py` Combine various data processing operations into a pipeline. A complete data pipeline configuration is shown below. ```python train_pipeline = [ dict(type='Sample'), dict(type='DeleteUseless', keys=['images', 'poses', 'i_data', 'idx']), dict(type='ToTensor', keys=['pose', 'target_s']), dict(type='GetRays'), dict(type='SelectRays', sel_n=N_rand_per_sampler, precrop_iters=500, precrop_frac=0.5), # in the first 500 iter, select rays inside center of image dict(type='GetViewdirs', enable=use_viewdirs), dict(type='ToNDC', enable=(not no_ndc)), dict(type='GetBounds'), dict(type='GetZvals', lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts'), dict(type='DeleteUseless', keys=['pose', 'iter_n']), ] ``` In this case, the input data is a dict, created in [_fetch_train_data()](../../../xrnerf/datasets/scene_dataset.py) ```python data = {'poses': self.poses, 'images': self.images, 'i_data': self.i_train, 'idx': idx} ``` In data pipeline, the data processing flow is as follows: * `Sample` select one image or pose via `idx`, create `pose` and `target_s` * `DeleteUseless` delete `'images', 'poses', 'i_data', 'idx'` in dict, they are already useless * `ToTensor` convert `'pose', 'target_s'` in dict * `GetRays` calculate `'rays_d', 'rays_o'` from camera parameter and images shape * `SelectRays` select a batchsize rays * `GetViewdirs` calculate viewdirs from rays' direction * `ToNDC` Coordinate system transformation * `GetBounds` get near and far * `GetZvals` samples points along rays between near point and far point * `PerturbZvals` data augmentation * `GetPts` get points' position ================================================ FILE: docs/en/tutorials/model.md ================================================ # Tutorial 3: Model In this tutorial, we will introduce the design of nerf model, and how data is processed inside model. - [Tutorial 3: Model](#tutorial-3-model) - [The Design of Nerf Model](#the-design-of-nerf-model) - [Overview](#overview) - [Embedder](#embedder) - [MLP](#mlp) - [RENDERS](#renders) - [NETWORKS](#networks) ## The Design of Nerf Model ### Overview In XRNeRF, models are basically categorized as 4 types. - embedder: convert point-position and viewdirection data into embedded data, embedder can be function only or with trainable paramters. - mlp: use the output of embedder as input, and output raw data (the rgb and density value at sampled position) for render, usually contains FC layers. - render: receive mlp's raw data, output the rgb value at a pixel. - network: the whole nerf model pipeline, usually contains a embedder, mlp and render. For all models, the input or output is a dict, named `data`. Model use item in `data`, create new item and add into `dada`. Take [origin nerf](../../../configs/nerfs/nerf_blender_base01.py) method as example, the `data` is supposed to contain `pts`(shape is n_rays, n_pts, 3) and `viewdirs`(shape is n_rays, n_pts, 3). ### Embedder The embedder usually takes points' position `pts` and rays' view direction `viewdirs` as input, generate embedded feature `embedded` and add it to `data`. You can read [origin nerf's embedder](../../../xrnerf/models/embedders/base.py) to have a clear understanding of how embedder works. To use [existed embedders](../../../xrnerf/models/embedders/__init__.py) in xrnerf, you can directlly choose one and specify it in config file. To realize your own embedder, read the following introductions. * Create a `my_embedder.py` file under [embedders directory](../../../xrnerf/models/embedders/). * Write a `MyEmbedder` class which inherits from `nn.Module` or `BaseEmbedder`, and define the `forward` method. * Import your new class in [init file](../../../xrnerf/models/embedders/__init__.py). * Modify the config file. ### MLP The mlp usually takes points' embedded feature `embedded` as input, generate raw data and add it to `data`. You can read [origin nerf's mlp](../../../xrnerf/models/mlps/nerf_mlp.py) to have a clear understanding of how mlp works. To use [existed mlps](../../../xrnerf/models/mlps/__init__.py) in xrnerf, you can directlly choose one and specify it in config file. To realize your own mlp, the steps are similar to the embedder's. ### RENDERS The render usually takes points' raw data as input, generate rgb values at each pixel (or ray). You can read [origin nerf's render](../../../xrnerf/models/renders/nerf_render.py) to have a clear understanding of how render works. To use [existed renders](../../../xrnerf/models/renders/__init__.py) in xrnerf, you can directlly choose one and specify it in config file. To realize your own render, the steps are similar to the embedder's. ### NETWORKS The network contains defined embedder, mlp and render, it interacts with the mmcv training pipeline during training. A network need to implement two abstract methods: `train_step` and `val_step`. [Here](../get_started.md) is a detail case about how to define a network. ================================================ FILE: docs/zh_cn/apis.md ================================================ # APIS ## run_nerf input: args, 运行python文件时的命令行参数 purpose: 解析命令行参数,并根据参数训练/测试/渲染一个nerf模型 ## train_nerf input: cfg, mmcv.Config purpose: args, 运行python文件时的命令行参数, 训练一个nerf模型 ## test_nerf input: cfg, mmcv.Config purpose: args, 运行python文件时的命令行参数, 测试/渲染一个nerf模型 ## parse_args input: args, 运行python文件时的命令行参数 purpose: 解析命令行参数 ================================================ FILE: docs/zh_cn/dataset_preparation.md ================================================ # 数据准备 本文介绍了如何准备XRNeRF所需数据集 - [数据准备](#数据准备) - [数据集存放结构](#数据集存放结构) - [数据集下载](#数据集下载) #### 数据集存放结构 我们推荐把数据集放在`项目目录/data`下面,否则可能需要修改config中的内容 ``` xrnerf ├── xrnerf ├── docs ├── configs ├── test ├── extensions ├── data │ ├── nerf_llff_data │ ├── nerf_synthetic │ ├── multiscale │ ├── multiscale_google │ ├── ... ``` #### 数据集下载 1. 从[这里](https://drive.google.com/drive/folders/128yBriW1IG_3NJ5Rp7APSTZsJqdJdfc1)下载 ```nerf_synthetic``` 和 ```nerf_llff_data``` , 并放在 ```xrnerf/data``` 里面 2. 下载[NSVF数据集](https://github.com/facebookresearch/NSVF), 具体请阅读[详细介绍](https://github.com/creiser/kilonerf#download-nsvf-datasets) 3. 对于mip-nerf方法的训练,需要生成需要的多尺度数据集,可通过命令生成 ```python tools/convert_blender_data.py --blenderdir /data/nerf_synthetic --outdir data/multiscale``` 4. 对于NeuralBody方法的训练, 请从[这里](https://github.com/zju3dv/neuralbody/blob/master/INSTALL.md#zju-mocap-dataset)下载数据集 5. 对于Animatable方法的训练, 请从[这里](https://github.com/zju3dv/animatable_nerf/blob/master/INSTALL.md#human36m-dataset)下载数据集 6. 对于GNR方法的训练, 请从[这里](https://generalizable-neural-performer.github.io/genebody.html)下载数据集 7. 对于BungeeNeRF方法的训练, 请从[这里](https://drive.google.com/drive/folders/1ybq-BuRH0EEpcp5OZT9xEMi-Px1pdx4D?usp=sharing)下载数据集 ================================================ FILE: docs/zh_cn/get_started.md ================================================ # 快速开始 本文档提供 XRNeRF 相关用法的基本教程。对于安装说明,请参阅 [安装指南](installation.md)。 - [快速开始](#快速开始) - [数据集](#数据集) - [创建模型](#创建模型) - [基本概念](#基本概念) - [自定义一个新模型](#自定义一个新模型) - [训练](#训练) - [迭代次数控制](#迭代次数控制) - [训练命令](#训练命令) - [测试](#测试) - [详细教程](#详细教程) ## 数据集 我们推荐把数据集放在`项目目录/data`下面,否则可能需要修改config中的内容 ``` xrnerf ├── xrnerf ├── docs ├── configs ├── test ├── extensions ├── data │ ├── nerf_llff_data │ ├── nerf_synthetic │ ├── multiscale │ ├── multiscale_google │ ├── ... ``` 请参阅 [数据集准备](dataset_preparation.md) 获取数据集准备的相关信息。 ## 创建模型 ### 基本概念 在XRNeRF中,模型被分为4个部分 - embedder: 输入点的位置和视角,输出embedded特征数据,embedder可能是纯函数型的,或者带有可学习参数的 - mlp: 使用embedder的输出作为输入,输出原始的点数据(采样点的rgb值和密度值)送给render, 一般由多层感知机组成 - render: 获取mlp的输出数据,沿着射线上的点进行积分等操作,输出图像上一个像素点的rgb值 - network: 将以上三个部分组织起来,同时也是与mmcv的runner进行交互的部分,控制了训练时的loss计算和验证时的指标计算 对于上述所有模型而言,输入都是一个字典类型的`data`。模型使用字典`data`中的内容来创建新的键值对,并加入`data`。以[origin nerf](../../configs/nerf/nerf_blender_base01.py)为例,最开始的`data`应该包含`pts`(尺寸为 n_rays, n_pts, 3) and `viewdirs`(尺寸为 n_rays, n_pts, 3). ### 自定义一个新模型 如果要自定义一个network,需要继承`BaseNerfNetwork`,其中定义了两个抽象方法 - `train_step()`: training 模式下的推理和计算loss的函数. - `val_step()`: testing 模式下的推理函数. [NerfNetwork](../../xrnerf/models/networks/nerf.py) 是一个很好的例子 具体而言,如果想要实现一个具有新feature的nerf方法,有以下几步需要做 1. 创建一个新文件如 `xrnerf/models/networks/my_networks.py`. ```python from ..builder import NETWORKS from .nerf import NerfNetwork @NETWORKS.register_module() class MyNerfNetwork(NerfNetwork): def __init__(self, cfg, mlp=None, mlp_fine=None, render=None): super().__init__(cfg, mlp, mlp_fine, render) def forward(self, data): .... def train_step(self, data, optimizer, **kwargs): .... def val_step(self, data, optimizer=None, **kwargs): .... ``` 2. 修改 `xrnerf/models/networks/__init__.py` 文件 ```python from .my_networks import MyNerfNetwork ``` 3. 修改配置文件[config file](../../configs/nerf/nerf_blender_base01.py) 原来 ```python model = dict( type='NerfNetwork', .... ``` 现在 ```python model = dict( type='MyNerfNetwork', .... ``` 同样的,要实现embedder/mlp/render的新功能,步骤与上述类似 * 要定义一个新的embedder, 需要继承`nn.Module` 或者 `BaseEmbedder`, 并定义 `forward` 方法. [BaseEmbedder](../../xrnerf/models/embedders/base.py) 是个很好的例子 * 要定义一个新的mlp, 需要继承 `nn.Module` 或者 `BaseMLP`, 并定义 `forward` 方法. [NerfMLP](../../xrnerf/models/mlps/nerf_mlp.py) 可供参考 * 要定义一个新的render, 需要继承 `nn.Module` 或者 `BaseRender`, 并定义 `forward` 方法. [NerfRender](../../xrnerf/models/renders/nerf_render.py) 可供参考 ## 训练 ### 迭代次数控制 XRnerf 使用 `mmcv.runner.IterBasedRunner` 来控制训练, 并用 `mmcv.runner.EpochBasedRunner` 来测试. 训练时, 配置文件的 `max_iters` 表示最多训练多少次. 测试时, `max_iters` 被强制改为1, 表示进行一次完整的epoch. ### 训练命令 ```shell python run_nerf.py --config configs/nerf/nerf_blender_local01.py --dataname lego ``` 参数为: - `--config`: 配置文件位置 - `--dataname`: 使用数据集下的哪个数据来训练 ### 测试 ```shell python run_nerf.py --config configs/nerf/nerf_blender_local01.py --dataname lego --test_only --load_from iter_50000.pth ``` 参数为: - `--config`: 配置文件位置 - `--dataname`: 使用数据集下的哪个数据 - `--test_only`: 切换为测试模式 - `--load_from`: 重载覆盖掉原来配置文件里的 `load_from`, 在某些情况下为了方便而使用 ## 详细教程 目前, XRNeRF 提供以下几种更详细的教程 * [如何编写配置文件](tutorials/config.md) * [数据处理流程](tutorials/data_pipeline.md) * [模型定义](tutorials/model.md) 除此以外,文档还包括以下内容 * [api介绍](api.md) * [数据集准备](dataset_preparation.md) * [安装](installation.md) ================================================ FILE: docs/zh_cn/installation.md ================================================ # 安装 本文档提供了安装 XRNeRF 的相关步骤。 - [安装](#安装) - [安装依赖包](#安装依赖包) - [准备环境](#准备环境) - [a. 安装系统依赖库.](#a-安装系统依赖库) - [b. 创建并激活 conda 虚拟环境.](#b-创建并激活-conda-虚拟环境) - [c. 安装 PyTorch 和 torchvision](#c-安装-pytorch-和-torchvision) - [d. 安装其他python包](#d-安装其他python包) - [e. 安装cuda扩展](#e-安装cuda扩展) - [d. 下载smpl_t_pose支持GNR](#d-下载smpl_t_pose支持gnr) - [利用 Docker 镜像安装 XRNeRF](#利用-docker-镜像安装-xrnerf) - [a. 创建docker镜像](#a-创建docker镜像) - [b. 运行docker容器](#b-运行docker容器) - [安装验证](#安装验证) ## 安装依赖包 - Linux - Python 3.7+ - **PyTorch 1.10+ (低版本可能无法支持)** - **CUDA 11.0+ (低版本可能无法支持)** - GCC 7.5+ - build-essential: Install by `apt-get install -y build-essential git ninja-build ffmpeg libsm6 libxext6 libgl1` - [mmcv-full](https://github.com/open-mmlab/mmcv) - Numpy - ffmpeg - [opencv-python 3+](https://github.com/dmlc/decord): 可通过 `pip install opencv-python>=3` 安装 - [imageio](https://github.com/dmlc/decord): 可通过 `pip install imageio` 安装 - [scikit-image](https://github.com/dmlc/decord): 可通过 `pip install scikit-image` 安装 - [lpips](https://github.com/richzhang/PerceptualSimilarity): 可通过 `pip install lpips` 安装 - [trimesh](https://github.com/mikedh/trimesh): 可通过 `pip install trimesh` 安装 - [smplx](https://github.com/vchoutas/smplx): 可通过 `pip install smplx` 安装 - [spconv](https://github.com/dmlc/decord): 从支持的版本中选择跟你本地cuda版本一致的安装, 比如 `pip install spconv-cu113` - [pytorch3d](https://github.com/dmlc/decord): 可通过 `pip install "git+https://github.com/facebookresearch/pytorch3d.git@stable"` 安装 关于硬件依赖: Instant-NGP需要GPU架构>=75, 也就是说至少需要RTX 20X0及以上的显卡,才能获得xrnerf的完整支持。 | RTX 30X0 | A100 | RTX 20X0 | TITAN V / V100 | GTX 10X0 / TITAN Xp | GTX 9X0 | K80 | |:--------:|:----:|:--------:|:--------------:|:-------------------:|:-------:|:---:| | 86 | 80 | 75 | 70 | 61 | 52 | 37 | 如果不需要运行Instant-NGP, [spconv](https://github.com/traveller59/spconv#spconv-spatially-sparse-convolution-library) 决定了最低的cuda版本依赖. 根据他们的表格可见,cuda10.2 是最低要求。 ## 准备环境 #### a. 安装系统依赖库. ```shell sudo apt install libgl-dev freeglut3-dev build-essential git ninja-build ffmpeg libsm6 libxext6 libgl1 ``` #### b. 创建并激活 conda 虚拟环境. ```shell conda create -n xrnerf python=3.7 -y conda activate xrnerf ``` #### c. 安装 PyTorch 和 torchvision 1. 查看pytorch-cuda版本匹配表,选择合适的版本 [here](https://pytorch.org/get-started/previous-versions/) or [here](https://blog.csdn.net/weixin_42069606/article/details/105198845) 2. 从[这里](https://download.pytorch.org/whl/torch_stable.html)下载合适版本的pytorch (>=1.10.0 且需要与你的cuda版本匹配), 比如 ```cu111/torch-1.10.0%2Bcu111-cp37-cp37m-linux_x86_64.whl```, 下载这个whl文件 3. 安装这个whl文件, 比如 ```pip install torch-1.10.0+cu111-cp37-cp37m-linux_x86_64.whl``` 4. 在[这里](https://pypi.org/project/torchvision/)查看版本匹配信息, 并安装正确版本的torchvision, 比如 ```pip install torchvision==0.12.0``` #### d. 安装其他python包 * 您可以使用 ```pip install requirements.txt``` 来安装大部分需要的 pkgs。 如果此步骤成功,您应该跳转到 ```kilo-cuda``` 和 ```spconv``` 步骤手动安装它们。 或者您可以跳过此步骤并按照以下安装步骤进行操作 * ```pip install 'opencv-python>=3' yapf imageio scikit-image lpips trimesh smplx``` * 根据[官方说明](https://mmcv.readthedocs.io/en/latest/get_started/installation.html),安装 ```mmcv-full``` * 安装 ```spconv```, 比如 ```pip install spconv-cu111```. 值得注意的是只有部分cuda版本是支持的, 具体请查看 [官方说明](https://github.com/traveller59/spconv) * 通过 ```pip install "git+https://github.com/facebookresearch/pytorch3d.git@stable"``` 安装 ```pytorch3d``` * 查看[官方说明](https://github.com/creiser/kilonerf#option-b-build-cuda-extension-yourself) 安装 ```kilo-cuda``` (非必须,运行kilo-nerf方法需要) * 通过```pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch``` 安装 ```tcnn```, 如果网络问题无法下载cutlass等,参考如下命令 ```shell git clone https://gitclone.com/github.com/nvlabs/tiny-cuda-nn cd tiny-cuda-nn/dependencies git clone https://gitclone.com/github.com/fmtlib/fmt.git git clone https://gitclone.com/github.com/NVIDIA/cutlass.git cd ../bindings/torch && python setup.py install ``` (非必须,运行instant-ngp方法需要) #### e. 安装cuda扩展 * 为了支持instant-ngp算法,需要编译安装cuda扩展 ```raymarch```, 查看[具体教程](../../extensions/ngp_raymarch/README.md) * 为了支持gnr算法,需要编译安装cuda扩展 ```mesh_grid```, 查看[具体教程](../../extensions/mesh_grid/README.md) #### d. 下载smpl_t_pose支持GNR * 为了支持gnr算法,需要从[GNR](https://github.com/generalizable-neural-performer/gnr)下载```smpl_t_pose```文件夹,并修改```configs/gnr/gnr_genebody.py```中的```basedata_cfg.t_pose_path```为对应的存放位置 ## 利用 Docker 镜像安装 XRNeRF 我们根据国内的网络环境优化了dockerfile,请使用[DockerfileCN](../../docker/DockerfileCN) 在安装前需要修改docker的daemon配置,从而让docker的build过程支持gpu (为了编译cuda扩展): ```shell sudo apt-get install nvidia-container-runtime -f -y sudo cp -f docker/daemon.json /etc/docker sudo systemctl restart docker ``` [这里](https://stackoverflow.com/questions/59691207/docker-build-with-nvidia-runtime)有更详细的解释. #### a. 创建docker镜像 XRNeRF 提供一个 [DockerfileCN](../../docker/DockerfileCN) 可以直接创建 docker 镜像 ```shell docker build -f ./docker/DockerfileCN --rm -t xrnerf . ``` **注意** 用户需要确保已经安装了 [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker)。 #### b. 运行docker容器 运行以下命令,创建容器: ```shell docker run --gpus all -it xrnerf /bin/bash ``` 在本机上(非docker镜像机内)开启一个终端,将项目文件(包括数据集)复制进docker镜像机 ```shell # d287273af72e 是镜像的id, usin通过 'docker ps -a' 确定id docker cp ProjectPath/xrnerf d287273af72e:/workspace ``` ## 安装验证 为了验证 XRNeRF 和所需的依赖包是否已经安装成功,可以运行单元测试模块 ```shell coverage run --source xrnerf/models -m pytest -s test/models && coverage report -m ``` 注意,运行单元测试模块前需要额外安装 ```coverage``` 和 ```pytest``` ``` pip install coverage pytest -i https://pypi.tuna.tsinghua.edu.cn/simple ``` ================================================ FILE: docs/zh_cn/tutorials/config.md ================================================ # 教程 1: 如何编写配置文件 XRNeRF 使用 python 文件作为配置文件。其配置文件系统的设计将模块化与继承整合进来,方便用户进行各种实验。 XRNeRF 提供的所有配置文件都放置在 `$PROJECT/configs` 文件夹下。 - [教程 1: 如何编写配置文件](#教程-1-如何编写配置文件) - [配置文件组成部分](#配置文件组成部分) ## 配置文件组成部分 配置文件的内容在逻辑上可以分为3个部分: * 训练 * 模型 * 数据 下面的内容将会逐部分介绍配置文件 * 训练 训练配置部分包含了控制训练过程的各类参数,包括optimizer, hooks, runner等等 ```python import os from datetime import datetime method = 'nerf' # nerf方法 # optimizer 参数 optimizer = dict(type='Adam', lr=5e-4, betas=(0.9, 0.999)) optimizer_config = dict(grad_clip=None) max_iters = 20000 # 训练多少个iter lr_config = dict(policy='step', step=500 * 1000, gamma=0.1, by_epoch=False) # 学习率和衰减 checkpoint_config = dict(interval=5000, by_epoch=False) # 保存checkpoint的间隔 log_level = 'INFO' log_config = dict(interval=5000, by_epoch=False, hooks=[dict(type='TextLoggerHook')]) workflow = [('train', 5000), ('val', 1)] # 循环: 每训练 5000 iters, 验证 1 iter # hooks # 'params' 是数值型参数, 'variables' 是代码运行上下面出现的变量 train_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='valset')), dict(type='ValidateHook', params=dict(save_folder='visualizations/validation')), dict(type='SaveSpiralHook', params=dict(save_folder='visualizations/spiral')), dict(type='PassIterHook', params=dict()), # 将当前iter数告诉dataset ] test_hooks = [ dict(type='SetValPipelineHook', params=dict(), variables=dict(valset='testset')), dict(type='TestHook', params=dict()), ] # runner train_runner = dict(type='NerfTrainRunner') test_runner = dict(type='NerfTestRunner') # runtime settings num_gpus = 1 distributed = (num_gpus > 1) # 是否使用 ddp work_dir = './work_dirs/nerfsv3/nerf_#DATANAME#_base01/' # 保存运行时产生文件的位置 timestamp = datetime.now().strftime('%d-%b-%H-%M') # 保证每次的workspace都不同 # some shared params by model and data, to avoid define twice dataset_type = 'blender' no_batching = True # 每次选择1张图片来抽取射线 no_ndc = True white_bkgd = True # 渲染时背景设定为全白 is_perturb = True # set to 0. for no jitter, 1. for jitter use_viewdirs = True # use full 5D input instead of 3D N_rand_per_sampler = 1024 * 4 # 在取多少根射线 在 get_item() 函数中使用 lindisp = False # sampling linearly in disparity rather than depth N_samples = 64 # 在coarse模型中输入多少根射线 # resume_from = os.path.join(work_dir, 'latest.pth') # load_from = os.path.join(work_dir, 'latest.pth') ``` * 模型 模型部分的配置信息,定义了网络模型结构,一个network通常由embedder, mlp 和 render组成。 ```python model = dict( type='NerfNetwork', # network 类名字 cfg=dict( phase='train', # 'train' or 'test' N_importance=128, # number of additional fine samples per ray is_perturb=is_perturb, # see above chunk=1024 * 32, # mainly work for val, to avoid oom bs_data='rays_o', # the data's shape indicates the real batch-size, this's also the num of rays ), mlp=dict( # coarse mlp model type='NerfMLP', # mlp class name skips=[4], netdepth=8, # layers in network netwidth=256, # channels per layer netchunk=1024 * 32, # to avoid oom output_ch=5, # 5 if cfg.N_importance>0 else 4 use_viewdirs=use_viewdirs, embedder=dict( type='BaseEmbedder', # embedder class name i_embed=0, # set 0 for default positional encoding, -1 for none multires=10, # log2 of max freq for positional encoding (3D location) multires_dirs=4, # this is 'multires_views' in origin codes, log2 of max freq for positional encoding (2D direction) ), ), mlp_fine=dict( # fine model type='NerfMLP', skips=[4], netdepth=8, netwidth=256, netchunk=1024 * 32, output_ch=5, use_viewdirs=use_viewdirs, embedder=dict( type='BaseEmbedder', i_embed=0, multires=10, multires_dirs=4, ), ), render=dict( type='NerfRender', # render cloass name white_bkgd=white_bkgd, # see above raw_noise_std=0, # std dev of noise added to regularize sigma_a output, 1e0 recommended ), ) ``` * 数据 数据部分的配置信息,定义了数据集类型,数据的处理流程,batchsize等等信息。 ```python basedata_cfg = dict( dataset_type=dataset_type, datadir='data/nerf_synthetic/#DATANAME#', half_res=True, # load blender synthetic data at 400x400 instead of 800x800 testskip= 8, # will load 1/N images from test/val sets, useful for large datasets like deepvoxels white_bkgd=white_bkgd, is_batching=False, # True for blender, False for llff mode='train', ) traindata_cfg = basedata_cfg.copy() valdata_cfg = basedata_cfg.copy() testdata_cfg = basedata_cfg.copy() traindata_cfg.update(dict()) valdata_cfg.update(dict(mode='val')) testdata_cfg.update(dict(mode='test', testskip=0)) train_pipeline = [ dict(type='Sample'), dict(type='DeleteUseless', keys=['images', 'poses', 'i_data', 'idx']), dict(type='ToTensor', keys=['pose', 'target_s']), dict(type='GetRays'), dict(type='SelectRays', sel_n=N_rand_per_sampler, precrop_iters=500, precrop_frac=0.5), # in the first 500 iter, select rays inside center of image dict(type='GetViewdirs', enable=use_viewdirs), dict(type='ToNDC', enable=(not no_ndc)), dict(type='GetBounds'), dict(type='GetZvals', lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts'), dict(type='DeleteUseless', keys=['pose', 'iter_n']), ] test_pipeline = [ dict(type='ToTensor', keys=['pose']), dict(type='GetRays'), dict(type='FlattenRays'), dict(type='GetViewdirs', enable=use_viewdirs), dict(type='ToNDC', enable=(not no_ndc)), dict(type='GetBounds'), dict(type='GetZvals', lindisp=lindisp, N_samples=N_samples), dict(type='PerturbZvals', enable=False), # do not perturb when test dict(type='GetPts'), dict(type='DeleteUseless', keys=['pose']), ] data = dict( train_loader=dict(batch_size=1, num_workers=4), train=dict( type='SceneBaseDataset', cfg=traindata_cfg, pipeline=train_pipeline, ), val_loader=dict(batch_size=1, num_workers=0), val=dict( type='SceneBaseDataset', cfg=valdata_cfg, pipeline=test_pipeline, ), test_loader=dict(batch_size=1, num_workers=0), test=dict( type='SceneBaseDataset', cfg=testdata_cfg, pipeline=test_pipeline, # same pipeline as validation ), ) ``` ================================================ FILE: docs/zh_cn/tutorials/data_pipeline.md ================================================ # 教程 2: 如何设计数据处理流程 在本教程中,我们将介绍一些有关数据前处理流水线设计的方法,以及如何为项目自定义和扩展自己的数据流水线。 - [教程 2: 如何设计数据处理流程](#教程-2-如何设计数据处理流程) - [数据处理流程的基本概念](#数据处理流程的基本概念) - [设计数据处理流程](#设计数据处理流程) ## 数据处理流程的基本概念 数据处理流程是用于数据处理的模块。我们把常见的nerf方法数据处理操作抽象化为一个个python类,即```pipeline```。 下面的代码块展示了如何定义一个数据处理流程类来从rays' direction计算viewdirs ```python @PIPELINES.register_module() class GetViewdirs: """get viewdirs from rays_d """ def __init__(self, enable=True, **kwargs): self.enable = enable def __call__(self, results): """get viewdirs Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ if self.enable: viewdirs = results['rays_d'].clone() viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True) viewdirs = torch.reshape(viewdirs, [-1, 3]).float() results['viewdirs'] = viewdirs return results ``` 我们可以直接在配置文件中,把`dict(type='GetViewdirs')`添加到`train_pipeline`中去来使用`GetViewdirs`。 ## 设计数据处理流程 我们根据处理逻辑把数据处理流程划分为了4个python文件: * `creat.py` 创建和计算新变量 * `augment.py` 数据增强操作 * `transforms.py` 修改数据格式或者变换坐标系 * `compose.py` 组合各种流程在一起. 下面展示了一个完整的数据处理流程配置 ```python train_pipeline = [ dict(type='Sample'), dict(type='DeleteUseless', keys=['images', 'poses', 'i_data', 'idx']), dict(type='ToTensor', keys=['pose', 'target_s']), dict(type='GetRays'), dict(type='SelectRays', sel_n=N_rand_per_sampler, precrop_iters=500, precrop_frac=0.5), # in the first 500 iter, select rays inside center of image dict(type='GetViewdirs', enable=use_viewdirs), dict(type='ToNDC', enable=(not no_ndc)), dict(type='GetBounds'), dict(type='GetZvals', lindisp=lindisp, N_samples=N_samples), # N_samples: number of coarse samples per ray dict(type='PerturbZvals', enable=is_perturb), dict(type='GetPts'), dict(type='DeleteUseless', keys=['pose', 'iter_n']), ] ``` 在上面的例子中,输入数据是一个字典,在[_fetch_train_data()](../../../xrnerf/datasets/scene_dataset.py)中创建 ```python data = {'poses': self.poses, 'images': self.images, 'i_data': self.i_train, 'idx': idx} ``` 在上面的数据处理流程中,分别做了以下事: * `Sample` 选择一张图和对应的pose,创建 `pose` 和 `target_s` * `DeleteUseless` 删除字典中的 `'images', 'poses', 'i_data', 'idx'`, 这些变量后面已经不会再被用到了 * `ToTensor` 把 `'pose', 'target_s'` 变成tensor * `GetRays` 从摄像机参数中计算calculate `'rays_d', 'rays_o'` * `SelectRays` 选择一个batch的射线 * `GetViewdirs` 从rays' direction计算viewdirs * `ToNDC` 进行坐标系转换 * `GetBounds` 获取射线上采样区间的最远和最近距离 * `GetZvals` 在射线上采样区间采点 * `PerturbZvals` 数据增强 * `GetPts` 获取点的坐标 ================================================ FILE: docs/zh_cn/tutorials/model.md ================================================ # 教程 3: 模型 在这个教程中,将介绍XRNeRF中模型的设计,以及数据在模型中数如何依次被处理的 - [教程 3: 模型](#教程-3-模型) - [XRNeRF中模型的设计](#xrnerf中模型的设计) - [概述](#概述) - [Embedder](#embedder) - [MLP](#mlp) - [RENDERS](#renders) - [NETWORKS](#networks) ## XRNeRF中模型的设计 ### 概述 在XRNeRF中,模型被分为4个部分 - embedder: 输入点的位置和视角,输出embedded特征数据,embedder可能是纯函数型的,或者带有可学习参数的 - mlp: 使用embedder的输出作为输入,输出原始的点数据(采样点的rgb值和密度值)送给render, 一般由多层感知机组成 - render: 获取mlp的输出数据,沿着射线上的点进行积分等操作,输出图像上一个像素点的rgb值 - network: 将以上三个部分组织起来,同时也是与mmcv的runner进行交互的部分,控制了训练时的loss计算和验证时的指标计算 对于上述所有模型而言,输入都是一个字典类型的`data`。模型使用字典`data`中的内容来创建新的键值对,并加入`data`。以[origin nerf](../../../configs/nerfs/nerf_blender_base01.py)为例,最开始的`data`应该包含`pts`(尺寸为 n_rays, n_pts, 3) and `viewdirs`(尺寸为 n_rays, n_pts, 3). ### Embedder Embedder的输入是点坐标`pts`和射线的角度`viewdirs`,输出嵌入后的特征数据 `embedded` 并加入`data`中去。可以阅读[origin nerf's embedder](../../../xrnerf/models/embedders/base.py) 来加深对这一过程的理解。 如果要使用XRNeRF中[已经存在的embedder](../../../xrnerf/models/embedders/__init__.py),可以直接选择一种,然后修改配置文件即可。而如果要实现自己的embedder,可以按照下面的指引 * 在[embedders](../../../xrnerf/models/embedders/)目录下创建一个 `my_embedder.py` 文件 * 在文件中实现一个 `MyEmbedder` 类,继承自`nn.Module` 或者 `BaseEmbedder`,并且定义 `forward` 方法. * 修改[init](../../../xrnerf/models/embedders/__init__.py)文件 * 修改配置文件 ### MLP mlp通常接收采样点的embedded feature `embedded`作为输入,产生raw data 并加入 `data`. 可以阅读[origin nerf's mlp](../../../xrnerf/models/mlps/nerf_mlp.py) 来加深对这一过程的理解。 如果要使用XRNeRF中[已经存在的mlp](../../../xrnerf/models/mlps/__init__.py),可以直接选择一种,然后修改配置文件即可。而如果要实现自己的mlp,操作步骤与上述过程类似 ### RENDERS render通常接收采样点的raw data作为输入,输出图像上像素点的rgb值 产生raw data 并加入 `data`. 可以阅读[origin nerf's mlp](../../../xrnerf/models/mlps/nerf_mlp.py) 来加深对这一过程的理解。 如果要使用XRNeRF中[已经存在的render](../../../xrnerf/models/renders/nerf_render.py),可以直接选择一种,然后修改配置文件即可。而如果要实现自己的render,操作步骤与上述过程类似 ### NETWORKS 一个network包括embedder, mlp 和 render,network会负责跟mmcv的训练流程交互。对一个network而言,需要实现以下方法:`train_step` 和 `val_step`. [这里](../get_started.md) 是如何定义network的例子。 ================================================ FILE: extensions/mesh_grid/README.md ================================================ # gnr_mesh_grid ## Install build and install mesh_grid,to support gnr ``` cd extensions/mesh_grid rm -rf build && clear && python setup.py install ``` ================================================ FILE: extensions/mesh_grid/__init__.py ================================================ from .mesh_grid_searcher import MeshGridSearcher ================================================ FILE: extensions/mesh_grid/matrix.h ================================================ #ifndef _MATRIX_H_ #define _MATRIX_H_ #ifndef __device__ #define __device__ #endif #ifndef __host__ #define __host__ #endif #ifndef ABS #define ABS(A) ((A) < 0 ? -(A) : (A)) #endif template __device__ __host__ bool solve3(scalar A[9], scalar b[3], scalar eps = 1e-6) { unsigned char pivot = 0, rank = 3, permute[3] = {0,1,2}; bool valid = true; scalar t = 0; if(ABS(A[0]) < ABS(A[1])) pivot = 1; if(ABS(A[pivot])< ABS(A[2])) pivot = 2; if(ABS(A[pivot])<= eps) { t = A[0]; A[0] = A[6]; A[6] = t; t = A[1]; A[1] = A[7]; A[7] = t; t = A[2]; A[2] = A[8]; A[8] = t; permute[--rank] = 0; pivot = 0; if(ABS(A[0]) < ABS(A[1])) pivot = 1; if(ABS(A[pivot])< ABS(A[2])) pivot = 2; if(ABS(A[pivot])<= eps) { t = A[0]; A[0] = A[3]; A[3] = t; t = A[1]; A[1] = A[4]; A[4] = t; t = A[2]; A[2] = A[5]; A[5] = t; permute[--rank] = 0; pivot = 0; if(ABS(A[0]) < ABS(A[1])) pivot = 1; if(ABS(A[pivot])< ABS(A[2])) pivot = 2; if(ABS(A[pivot])<= eps) permute[--rank] = 0; } } if(rank > 0) { if(pivot == 1) { A[0] /= A[1]; t = A[4]; A[4] = A[3] - A[0]*t; A[3] = t; t = A[7]; A[7] = A[6] - A[0]*t; A[6] = t; t = b[1]; b[1] = b[0] - A[0]*t; b[0] = t; A[0] = A[1]; pivot = 0; } else { A[1] /= A[pivot]; A[4] = A[4] - A[1]*A[pivot+3]; A[7] = A[7] - A[1]*A[pivot+6]; b[1] = b[1] - A[1]*b[pivot]; } if(pivot == 2) { A[0] /= A[2]; t = A[5]; A[5] = A[3] - A[0]*t; A[3] = t; t = A[8]; A[8] = A[6] - A[0]*t; A[6] = t; t = b[2]; b[2] = b[0] - A[0]*t; b[0] = t; A[0] = A[2]; } else { A[2] /= A[pivot]; A[5] = A[5] - A[2]*A[pivot+3]; A[8] = A[8] - A[2]*A[pivot+6]; b[2] = b[2] - A[2]*b[pivot]; } if(rank > 1) { pivot = (ABS(A[4]) < ABS(A[5]) ? 2 : 1); if(ABS(A[pivot]) <= eps) { if(rank > 2) { t = A[3]; A[3] = A[6]; A[6] = t; t = A[4]; A[4] = A[7]; A[7] = t; t = A[5]; A[5] = A[8]; A[8] = t; permute[--rank] = 1; pivot = (ABS(A[4]) < ABS(A[5]) ? 2 : 1); if(ABS(A[pivot]) <= eps) permute[--rank] = 1; } else permute[--rank] = 1; } } if(rank > 1) { if(pivot == 2) { A[4] /= A[5]; t = A[8]; A[8] = A[7] - A[4]*t; A[7] = t; t = b[2]; b[2] = b[1] - A[4]*t; b[1] = t; A[4] = A[5]; } else { A[5] /= A[4]; A[8] = A[8] - A[5]*A[7]; b[2] = b[2] - A[5]*b[1]; } if(rank >= 3 && ABS(A[8]) <= eps) permute[--rank] = 2; } } if(rank >= 3) { b[2] = b[2] / A[8]; } else if(ABS(b[2]) > eps) { valid = false; } if(rank >= 2) { b[1] = (b[1] - A[7]*b[2]) / A[4]; } else if(ABS(b[1]) > eps) { valid = false; } if(rank >= 1) { b[0] = (b[0] - A[6]*b[2] - A[3]*b[1]) / A[0]; } else if(ABS(b[0]) > eps) { valid = false; } if(rank <= 1 && permute[1] != 1) { t = b[1]; b[1] = b[permute[1]]; b[permute[1]] = t; } if(rank <= 2 && permute[2] != 2) { t = b[2]; b[2] = b[permute[2]]; b[permute[2]] = t; } return valid; } template __device__ __host__ bool solve4(scalar A[16], scalar b[4], scalar eps = 1e-6) { unsigned char pivot = 0, rank = 4, permute[4] = {0,1,2,3}; bool valid = true; scalar t = 0; if(ABS(A[0]) < ABS(A[1])) pivot = 1; if(ABS(A[pivot])< ABS(A[2])) pivot = 2; if(ABS(A[pivot])< ABS(A[3])) pivot = 3; if(ABS(A[pivot])<= eps) { t = A[0]; A[0] = A[12]; A[12] = t; t = A[1]; A[1] = A[13]; A[13] = t; t = A[2]; A[2] = A[14]; A[14] = t; t = A[3]; A[3] = A[15]; A[15] = t; permute[--rank] = 0; pivot = 0; if(ABS(A[0]) < ABS(A[1])) pivot = 1; if(ABS(A[pivot])< ABS(A[2])) pivot = 2; if(ABS(A[pivot])< ABS(A[3])) pivot = 3; if(ABS(A[pivot])<= eps) { t = A[0]; A[0] = A[8]; A[8] = t; t = A[1]; A[1] = A[9]; A[9] = t; t = A[2]; A[2] = A[10];A[10]= t; t = A[3]; A[3] = A[11];A[11]= t; permute[--rank] = 0; pivot = 0; if(ABS(A[0]) < ABS(A[1])) pivot = 1; if(ABS(A[pivot])< ABS(A[2])) pivot = 2; if(ABS(A[pivot])< ABS(A[3])) pivot = 3; if(ABS(A[pivot])<= eps) { t = A[0]; A[0] = A[4]; A[4] = t; t = A[1]; A[1] = A[5]; A[5] = t; t = A[2]; A[2] = A[6]; A[6] = t; t = A[3]; A[3] = A[7]; A[7] = t; permute[--rank] = 0; pivot = 0; if(ABS(A[0]) < ABS(A[1])) pivot = 1; if(ABS(A[pivot])< ABS(A[2])) pivot = 2; if(ABS(A[pivot])< ABS(A[3])) pivot = 3; if(ABS(A[pivot])<= eps) permute[--rank] = 0; } } } if(rank > 0) { if(pivot == 1) { A[0] /= A[1]; t = A[5]; A[5] = A[4] - A[0]*t; A[4] = t; t = A[9]; A[9] = A[8] - A[0]*t; A[8] = t; t = A[13];A[13]= A[12]- A[0]*t; A[12]= t; t = b[1]; b[1] = b[0] - A[0]*t; b[0] = t; A[0] = A[1]; pivot = 0; } else { A[1] /= A[pivot]; A[5] = A[5] - A[1]*A[pivot+4]; A[9] = A[9] - A[1]*A[pivot+8]; A[13]= A[13]- A[1]*A[pivot+12]; b[1] = b[1] - A[1]*b[pivot]; } if(pivot == 2) { A[0] /= A[2]; t = A[6]; A[6] = A[4] - A[0]*t; A[4] = t; t = A[10];A[10]= A[8] - A[0]*t; A[8] = t; t = A[14];A[14]= A[12]- A[0]*t; A[12]= t; t = b[2]; b[2] = b[0] - A[0]*t; b[0] = t; A[0] = A[2]; pivot = 0; } else { A[2] /= A[pivot]; A[6] = A[6] - A[2]*A[pivot+4]; A[10]= A[10]- A[2]*A[pivot+8]; A[14]= A[14]- A[2]*A[pivot+12]; b[2] = b[2] - A[2]*b[pivot]; } if(pivot == 3) { A[0] /= A[3]; t = A[7]; A[7] = A[4] - A[0]*t; A[4] = t; t = A[11];A[11]= A[8] - A[0]*t; A[8] = t; t = A[15];A[15]= A[12]- A[0]*t; A[12]= t; t = b[3]; b[3] = b[0] - A[0]*t; b[0] = t; A[0] = A[3]; } else { A[3] /= A[pivot]; A[7] = A[7] - A[3]*A[pivot+4]; A[11]= A[11]- A[3]*A[pivot+8]; A[15]= A[15]- A[3]*A[pivot+12]; b[3] = b[3] - A[3]*b[pivot]; } } if(rank > 1) { pivot = 1; if(ABS(A[5]) < ABS(A[6])) pivot = 2; if(ABS(A[pivot+4])< ABS(A[7])) pivot = 3; if(ABS(A[pivot+4]) <= eps) { if(rank > 2) { t = A[4]; A[4] = A[rank*4-4]; A[rank*4-4] = t; t = A[5]; A[5] = A[rank*4-3]; A[rank*4-3] = t; t = A[6]; A[6] = A[rank*4-2]; A[rank*4-2] = t; t = A[7]; A[7] = A[rank*4-1]; A[rank*4-1] = t; permute[--rank] = 1; pivot = 1; if(ABS(A[5]) < ABS(A[6])) pivot = 2; if(ABS(A[pivot+4])< ABS(A[7])) pivot = 3; if(ABS(A[pivot+4])<= eps) { if(rank > 2) { t = A[4]; A[4] = A[rank*4-4]; A[rank*4-4] = t; t = A[5]; A[5] = A[rank*4-3]; A[rank*4-3] = t; t = A[6]; A[6] = A[rank*4-2]; A[rank*4-2] = t; t = A[7]; A[7] = A[rank*4-1]; A[rank*4-1] = t; permute[--rank] = 1; pivot = 1; if(ABS(A[5]) < ABS(A[6])) pivot = 2; if(ABS(A[pivot+4])< ABS(A[7])) pivot = 3; } else permute[--rank] = 1; } } else permute[--rank] = 1; } } if(rank > 1) { if(pivot == 2) { A[5] /= A[6]; t = A[10];A[10]= A[9] - A[5]*t; A[9] = t; t = A[14];A[14]= A[13]- A[5]*t; A[13]= t; t = b[2]; b[2] = b[1] - A[5]*t; b[1] = t; A[5] = A[6]; pivot = 1; } else { A[6] /= A[pivot+4]; A[10]= A[10]- A[6]*A[pivot+8]; A[14]= A[14]- A[6]*A[pivot+12]; b[2] = b[2] - A[6]*b[pivot]; } if(pivot == 3) { A[5] /= A[7]; t = A[11];A[11]= A[9] - A[5]*t; A[9] = t; t = A[15];A[15]= A[13]- A[5]*t; A[13]= t; t = b[3]; b[3] = b[1] - A[5]*t; b[1] = t; A[5] = A[7]; } else { A[7] /= A[pivot+4]; A[11]= A[11]- A[7]*A[pivot+8]; A[15]= A[15]- A[7]*A[pivot+12]; b[3] = b[3] - A[7]*b[pivot]; } } if(rank > 2) { pivot = (ABS(A[10]) < ABS(A[11]) ? 3 : 2); if(ABS(A[pivot+8]) <= eps) { if(rank > 3) { t = A[8]; A[8] = A[12]; A[12] = t; t = A[9]; A[9] = A[13]; A[13] = t; t = A[10];A[10]= A[14]; A[14] = t; t = A[11];A[11]= A[15]; A[15] = t; permute[--rank] = 2; pivot = (ABS(A[10]) < ABS(A[11]) ? 3 : 2); if(ABS(A[pivot+8])<= eps) { if(rank > 3) { t = A[8]; A[8] = A[12]; A[12] = t; t = A[9]; A[9] = A[13]; A[13] = t; t = A[10];A[10]= A[14]; A[14] = t; t = A[11];A[11]= A[15]; A[15] = t; permute[--rank] = 2; pivot = (ABS(A[10]) < ABS(A[11]) ? 3 : 2); } else permute[--rank] = 2; } } else permute[--rank] = 2; } } if(rank > 2) { if(pivot == 3) { A[10] /= A[11]; t = A[15];A[15]= A[14]- A[10]*t; A[14]= t; t = b[3]; b[3] = b[2] - A[10]*t; b[2] = t; A[10] = A[11]; } else { A[11] /= A[pivot+8]; A[15]= A[15]- A[11]*A[pivot+12]; b[3] = b[3] - A[11]*b[pivot]; } if(rank > 3 && ABS(A[15]) <= eps) permute[--rank] = 3; } if(rank >= 4) { b[3] = b[3] / A[15]; } else if(ABS(b[3]) > eps) { valid = false; } if(rank >= 3) { b[2] = (b[2] - A[14]*b[3]) / A[10]; } else if(ABS(b[1]) > eps) { valid = false; } if(rank >= 2) { b[1] = (b[1] - A[9]*b[2] - A[13]*b[3]) / A[5]; } else if(ABS(b[1]) > eps) { valid = false; } if(rank >= 1) { b[0] = (b[0] - A[4]*b[1] - A[8]*b[2] - A[12]*b[3]) / A[0]; } else if(ABS(b[0]) > eps) { valid = false; } if(rank <= 1 && permute[1] != 1) { t = b[1]; b[1] = b[permute[1]]; b[permute[1]] = t; } if(rank <= 2 && permute[2] != 2) { t = b[2]; b[2] = b[permute[2]]; b[permute[2]] = t; } if(rank <= 3 && permute[3] != 3) { t = b[3]; b[3] = b[permute[3]]; b[permute[3]] = t; } return valid; } #endif ================================================ FILE: extensions/mesh_grid/mesh_grid.cpp ================================================ #include at::Tensor insert_grid_surface_cuda( at::Tensor verts, at::Tensor faces, at::Tensor minmax, at::Tensor num, float step, at::Tensor tri_num ); void search_nearest_point_cuda ( at::Tensor points, at::Tensor verts, at::Tensor faces, at::Tensor tri_num, at::Tensor tri_idx, at::Tensor num, at::Tensor minmax, float step, at::Tensor near_faces, at::Tensor near_pts, at::Tensor coeff ); void search_inside_mesh_cuda ( at::Tensor points, at::Tensor verts, at::Tensor faces, at::Tensor tri_num, at::Tensor tri_idx, at::Tensor num, at::Tensor minmax, float step, at::Tensor signs ); void search_intersect_cuda ( at::Tensor origins, at::Tensor directions, at::Tensor verts, at::Tensor faces, at::Tensor tri_num, at::Tensor tri_idx, at::Tensor num, at::Tensor minmax, float step, at::Tensor intersect ); #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") at::Tensor insert_grid_surface( at::Tensor verts, at::Tensor faces, at::Tensor minmax, at::Tensor num, float step, at::Tensor tri_num ) { CHECK_CUDA(verts); CHECK_CUDA(faces); CHECK_CUDA(minmax); CHECK_CUDA(num); CHECK_CUDA(tri_num); return insert_grid_surface_cuda(verts, faces, minmax, num, step, tri_num); } void search_nearest_point( at::Tensor points, at::Tensor verts, at::Tensor faces, at::Tensor tri_num, at::Tensor tri_idx, at::Tensor num, at::Tensor minmax, float step, at::Tensor near_faces, at::Tensor near_pts, at::Tensor coeff ) { CHECK_CUDA(points); CHECK_CUDA(verts); CHECK_CUDA(faces); CHECK_CUDA(tri_num); CHECK_CUDA(tri_idx); CHECK_CUDA(num); CHECK_CUDA(minmax); CHECK_CUDA(near_faces); CHECK_CUDA(coeff); search_nearest_point_cuda(points, verts, faces, tri_num, tri_idx, num, minmax, step, near_faces, near_pts, coeff); } void search_inside_mesh( at::Tensor points, at::Tensor verts, at::Tensor faces, at::Tensor tri_num, at::Tensor tri_idx, at::Tensor num, at::Tensor minmax, float step, at::Tensor signs ) { CHECK_CUDA(points); CHECK_CUDA(verts); CHECK_CUDA(faces); CHECK_CUDA(tri_num); CHECK_CUDA(tri_idx); CHECK_CUDA(num); CHECK_CUDA(minmax); CHECK_CUDA(signs); search_inside_mesh_cuda(points, verts, faces, tri_num, tri_idx, num, minmax, step, signs); } void search_intersect ( at::Tensor origins, at::Tensor directions, at::Tensor verts, at::Tensor faces, at::Tensor tri_num, at::Tensor tri_idx, at::Tensor num, at::Tensor minmax, float step, at::Tensor intersect ){ CHECK_CUDA(origins); CHECK_CUDA(directions); CHECK_CUDA(verts); CHECK_CUDA(faces); CHECK_CUDA(tri_num); CHECK_CUDA(tri_idx); CHECK_CUDA(num); CHECK_CUDA(minmax); CHECK_CUDA(intersect); search_intersect_cuda(origins, directions, verts, faces, tri_num, tri_idx, num, minmax, step, intersect); } at::Tensor cumsum( at::Tensor input ){ input.set_(input.cumsum(0)); // input.set_(at::zeros(input.sizes())); // input.zero_(); input = input.reshape({1,1,-1}); return input; } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("insert_grid_surface", &insert_grid_surface, "INSERT_GRID_SURFACE (CUDA)"); m.def("search_nearest_point", &search_nearest_point, "SEARCH_NEAREST_POINT (CUDA)"); m.def("search_inside_mesh", &search_inside_mesh, "SEARCH_INSIDE_MESH (CUDA)"); m.def("search_intersect", &search_intersect, "SEARCH_INTERSECT (CUDA)"); m.def("cumsum", &cumsum, "RESHAPE_TENSOR"); } ================================================ FILE: extensions/mesh_grid/mesh_grid_kernel.cu ================================================ #include #include #include #include #include #include "matrix.h" #ifndef MAX #define MAX(a,b) ((a) < (b) ? (b) : (a)) #endif template __device__ scalar_t search_nearest_proj( const scalar_t patch[9], scalar_t coeff[3], scalar_t precision = 1e-9) { scalar_t p[29]; /* coeff[0] = coeff[1] = coeff[2] = 1./3; p[0] = coeff[0]*patch[0]+coeff[1]*patch[3]+coeff[2]*patch[6]; p[1] = coeff[0]*patch[1]+coeff[1]*patch[4]+coeff[2]*patch[7]; p[2] = coeff[0]*patch[2]+coeff[1]*patch[5]+coeff[2]*patch[8]; return p[0]*p[0]+p[1]*p[1]+p[2]*p[2]; */ unsigned char i = 0, j = 1, k = 2; for(i = 0; i < 3; ++i) for(j = i; j < 3; ++j) { p[20+j+3*i] = 0; for(k = 0; k < 3; ++k) p[20+j+3*i] += patch[k+i*3] * patch[k+j*3]; p[20+i+3*j] = p[20+j+3*i]; } p[0] = p[20]; p[1] = p[21]; p[2] = p[22]; p[3] = 1; p[4] = p[23]; p[5] = p[24]; p[6] = p[25]; p[7] = 1; p[8] = p[26]; p[9] = p[27]; p[10]= p[28]; p[11]= 1; p[12]= 1; p[13]= 1; p[14]= 1; p[15]= 0; p[16]= 0; p[17]= 0; p[18]= 0; p[19]= 1; if(!solve4(p, p+16, precision)) { p[0] = p[24]+p[28]-p[25]-p[27]; p[1] = p[28]+p[20]-p[26]-p[22]; p[2] = p[20]+p[24]-p[21]-p[23]; i = (p[0] < p[1] ? 1 : 0); i = (p[i] < p[2] ? 2 : i); j = (i+1) % 3; k = 3-i-j; p[0] = p[20+4*j]; p[1] = p[20+3*j+k];p[2] = 1; p[3] = p[20+3*k+j];p[4] = p[20+4*k]; p[5] = 1; p[6] = 1; p[7] = 1; p[8] = 0; p[9] = 0; p[10]= 0; p[11]= 1; if(!solve3(p, p+9, precision)) { coeff[i] = 0; coeff[j] =.5; coeff[k] =.5; return (p[20+4*j]+p[20+4*k]) / 2; } else if(p[9] < 0) { coeff[i] = 0; coeff[j] = 0; coeff[k] = 1; return p[20+4*k]; } else if(p[10] < 0) { coeff[i] = 0; coeff[j] = 1; coeff[k] = 0; return p[20+4*j]; } else { coeff[i] = 0; coeff[j] = p[9]; coeff[k] = p[10]; return ABS(p[11]); } } else { i = (p[16] > p[17] ? 1 : 0); i = (p[16+i]> p[18] ? 2 : i); if(p[16+i] < 0) { j = (i+1) % 3; k = 3-i-j; p[0] = p[20+4*j]; p[1] = p[20+3*j+k];p[2] = 1; p[3] = p[20+3*k+j];p[4] = p[20+4*k]; p[5] = 1; p[6] = 1; p[7] = 1; p[8] = 0; p[9] = 0; p[10]= 0; p[11]= 1; solve3(p, p+9, precision); if(p[9] < 0) { coeff[i] = 0; coeff[j] = 0; coeff[k] = 1; return p[20+4*k]; } else if(p[10] < 0) { coeff[i] = 0; coeff[j] = 1; coeff[k] = 0; return p[20+4*j]; } else { coeff[i] = 0; coeff[j] = p[9]; coeff[k] = p[10]; return ABS(p[11]); } } else { coeff[0] = p[16]; coeff[1] = p[17]; coeff[2] = p[18]; return ABS(p[19]); } } } template __global__ void insert_grid_surface_kernel( const scalar_t *points, const index *_surf, index n, scalar_t step, const scalar_t _min[dim], const index num[dim], index *surf_num, index *surf_idx = NULL) { // const scalar_t step = _step[0]; const int id = blockIdx.x * blockDim.x + threadIdx.x; if(points == NULL || _surf == NULL || _min == NULL || num == NULL || surf_num == NULL || dim <= 0 || step <= 0 || n <= 0 || id >= n) return; const index *surf = _surf + id * dim; index bbox[dim * 2], bbox_num = 1; for(unsigned char d = 0; d < dim; ++d) { scalar_t minmax[2] = { points[dim*surf[0] + d], points[dim*surf[0] + d]}; for(unsigned char j = 1; j < dim; ++j) if(minmax[0] > points[dim*surf[j] + d]) minmax[0] = points[dim*surf[j] + d]; else if(minmax[1] < points[dim*surf[j] + d]) minmax[1] = points[dim*surf[j] + d]; scalar_t x = (minmax[0] - _min[d]) / step; bbox[d] = x < 0 ? 0 : (x >= num[d] ? num[d] - 1 : (index)floor(x)); x = (minmax[1] - _min[d]) / step; bbox[d+dim] =(x < 0 ? 0 : (x >= num[d] ? num[d] - 1 : (index)floor(x))) + 1; bbox_num *= (bbox[d+dim] - bbox[d]); } for(index j = 0; j < bbox_num; ++j) { index ind = 0, k = j; for(unsigned char d = 0; d < dim; ++d) { if(d > 0) ind *= num[d]; ind += (bbox[d] + k % (bbox[d+dim] - bbox[d])); k /= (bbox[d+dim] - bbox[d] + 1e-8); } if(surf_idx == NULL) // ++surf_num[ind]; atomicAdd(surf_num+ind, 1); else for(k = (ind == 0 ? 0 : surf_num[ind-1]); k < surf_num[ind]; ++k) if(atomicCAS(surf_idx+k, 0, id+1) == 0) { // surf_idx[k] = i + 1; // atomicExch(&surf_idx[k], i+1) break; } } } template void print_tensor(at::Tensor tensor){ int32_t size = tensor.size(0); if (size < 100) for (int i=0; i() << " "; } else{ // for (int i=0; i<3; i++) // std::cout << tensor[i].item() << " "; // std::cout << " ... "; // for (int i=-1; i>-4; i--) // std::cout << tensor[i].item() << " "; for (int i=0; i() << " "; std::cout << std::endl; } } std::cout << std::endl; } at::Tensor insert_grid_surface_cuda( at::Tensor verts, at::Tensor faces, at::Tensor minmax, at::Tensor num, float step, at::Tensor tri_num ) { if(faces.sizes().size() != 2) faces = faces.reshape({-1,3}); const int32_t num_faces = faces.size(0); const int threads = 512; const dim3 blocks (num_faces / threads + 1, 1, 1); tri_num.zero_(); // clear tri_num buffer AT_DISPATCH_FLOATING_TYPES(verts.type(), "insert_grid_surface_cuda", ([&] { insert_grid_surface_kernel<<>>( verts.data(), faces.data(), num_faces, step, minmax.data(), num.data(), tri_num.data(), NULL ); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in first insert_grid_surface_cuda: %s\n", cudaGetErrorString(err)); tri_num.set_(at::_cast_Int(tri_num.cumsum(0))); // cumsum determines the size of tri_idx buffer // make buffer const int32_t size = tri_num[-1].item(); // tri_idx.resize_({size}); // tri_idx.zero_(); at::Tensor tri_idx = at::zeros({size}, tri_num.options()); AT_DISPATCH_FLOATING_TYPES(verts.type(), "insert_grid_surface_cuda2", ([&] { insert_grid_surface_kernel<<>>( verts.data(), faces.data(), num_faces, step, minmax.data(), num.data(), tri_num.data(), tri_idx.data() ); })); err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in second insert_grid_surface_cuda: %s\n", cudaGetErrorString(err)); return tri_idx; } template __global__ void search_nearest_point_kenerel( const index *tri_num, const index *tri_idx, const index *size, const scalar_t *_min, scalar_t step, const scalar_t *points_base, const index *tri, const scalar_t *point_search_, const index points_num, scalar_t *coeff_ = NULL, scalar_t *proj_ = NULL, index *near_idx_ = NULL, scalar_t max_r2 = 0) { const int id = blockIdx.x * blockDim.x + threadIdx.x; const scalar_t *point_search = point_search_ + 3 * id; scalar_t *coeff = coeff_ + 3 * id; scalar_t *proj = proj_ + 3 * id; index *near_idx = near_idx_ + id; if(points_base == NULL || tri == NULL || point_search_ == NULL || tri_num == NULL || tri_idx == NULL || size == NULL || _min == NULL || step <= 0 || id >= points_num) return; index x[dim*2+1], maxLinf = 0, n = 1, nearest = tri_num[size[dim]-1]; for(unsigned char d = 0; d < dim; ++d) { scalar_t xf = (point_search[d] - _min[d]) / step; xf = (xf < 0 ? 0 :(xf >= size[d] ? size[d]-1 : floor(xf))); x[d] = (index)xf; x[dim] = d > 0 ? x[dim] * size[d] + x[d] : x[d]; if(x[d] > size[d] - x[d]) maxLinf = MAX(maxLinf, x[d]); else maxLinf = MAX(maxLinf, size[d]-x[d]); } scalar_t dist2 = 0, e = 0, dis2 = (max_r2 <= 0 ? -1 : max_r2); for(index Linf = 0; Linf < maxLinf; ++Linf) { n = 1; for(unsigned char d = 1; d < dim; ++d) n *= (2*Linf+1); for(index f = 0; f < (Linf == 0 ? 1 : 2*dim); ++f) { x[dim+1+f%dim] = f < dim ? -Linf : Linf; for(index k = 0; k < n; ++k) { index i, j = k; for(unsigned char d = 1; d < dim; ++d) { if(d+f >= 2*dim) { x[dim+1+(d+f)%dim] = j%(2*Linf-1) - Linf + 1; j = j / (2*Linf-1); } else if(d+f >= dim) { x[dim+1+(d+f)%dim] = j%(2*Linf) - Linf + 1; j = j / (2*Linf); } else { x[dim+1+(d+f)%dim] = j%(2*Linf+1) - Linf; j = j / (2*Linf+1); } } dist2 = 0; for(unsigned char d = 0; d < dim; ++d) { index y = x[d] + x[dim+1+d]; if(y < 0 || y >= size[d]) { x[dim] = size[dim]; break; } if(x[dim+1+d] < 0) { e = point_search[d] - _min[d] - step*(y+1); dist2 += e * e; } else if(x[dim+1+d] > 0) { e =-point_search[d] + _min[d] + step*y; dist2 += e * e; } x[dim] = d > 0 ? x[dim] * size[d] + y : y; } if(x[dim] >= size[dim]) continue; if(dis2 >= 0 && dis2 < dist2) continue; // Find closest point and distance in a triangle face for(i = x[dim] == 0 ? 0 : tri_num[x[dim]-1]; i < tri_num[x[dim]]; ++i) { scalar_t patch[dim * dim]; scalar_t _coeff[dim] = {0.33,0.33,0.33}; for(unsigned char d = 0; d < dim; ++d){ for(unsigned char d_= 0; d_< dim; ++d_){ patch[d_+ d*dim] = points_base[d_+dim* tri[d+dim*tri_idx[i]-dim]] - point_search[d_]; } } dist2 = search_nearest_proj(patch, _coeff); // printf("%d: %f %f %f\n", (int)threadIdx.x, _coeff[0], _coeff[1], _coeff[2]); if(dis2 < 0 || dist2 < dis2) { if(coeff != NULL) { coeff[0] = _coeff[0]; coeff[1] = _coeff[1]; coeff[2] = _coeff[2]; proj[0] = point_search[0] + coeff[0]*patch[0] + coeff[1]*patch[3] + coeff[2]*patch[6]; proj[1] = point_search[1] + coeff[0]*patch[1] + coeff[1]*patch[4] + coeff[2]*patch[7]; proj[2] = point_search[2] + coeff[0]*patch[2] + coeff[1]*patch[5] + coeff[2]*patch[8]; } nearest = tri_idx[i] - 1; dis2 = dist2; } } } if(f < dim-1) n = n / (2*Linf+1) * (2*Linf); else if(f >= dim) n = n / (2*Linf) * (2*Linf-1); } if(dis2 >= 0 && dis2 < Linf*Linf*step*step) break; } // return nearest; near_idx[0] = nearest; } void search_nearest_point_cuda ( at::Tensor points, at::Tensor verts, at::Tensor faces, at::Tensor tri_num, at::Tensor tri_idx, at::Tensor num, at::Tensor minmax, float step, at::Tensor near_faces, at::Tensor near_pts, at::Tensor coeff ) { if(points.sizes().size() != 2) points = points.reshape({-1,3}); int32_t points_num = points.size(0); const int threads = 512; const dim3 blocks (points_num / threads + 1, 1, 1); // make output // near_faces.resize_({points_num}); // near_faces.zero_(); // near_pts.resize_({points_num, 3}); // near_pts.zero_(); // coeff.resize_({points_num, 3}); // coeff.zero_(); AT_DISPATCH_FLOATING_TYPES(verts.type(), "search_nearest_point_cuda", ([&] { search_nearest_point_kenerel<<>>( tri_num.data(), tri_idx.data(), num.data(), minmax.data(), step, verts.data(), faces.data(), points.data(), points_num, coeff.data(), near_pts.data(), near_faces.data() ); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in search_nearest_point_cuda: %s\n", cudaGetErrorString(err)); } template bool __device__ intersect_tri( const scalar_t* src, unsigned char dir, scalar_t* patch, unsigned char dim ) { if(dir > 2 * dim) return false; bool intersect = false; scalar_t patch_[6], det = 0; switch(dir % 2) { case 1: for(unsigned char d = 0; d < dim; ++d) if(patch[dir/2+dim*d] > src[dir/2]) { intersect = true; break; } if(!intersect) return false; break; default:for(unsigned char d = 0; d < dim; ++d) if(patch[dir/2+dim*d] < src[dir/2]) { intersect = true; break; } if(!intersect) return false; break;} if(dim <= 1) { return true; } else if(dim > 2) { unsigned char r = 0; for(unsigned char d = 0; d < dim; ++d) { for(unsigned char i = 0; i < dim-1; ++i) { patch_[i] = src[(i+1+dir/2)%dim]; for(unsigned char j = 1; j < dim; ++j) patch_[i+(dim-1)*j] = patch[(i+1+dir/2)%dim+dim*((j+d)%dim)]; } r += intersect_tri(patch_, 0, patch_+dim-1, dim-1); } if(r % 2 == 0) return false; } for(unsigned char i = 0; i < dim*dim; ++i) patch[i] -= src[i%dim]; /* For 3-dimension, dir % 2 == 0, dir / 2 == 0, we have [Xa Xb Xc 1][ Ca ] [X] Ca >= 0 [Ya Yb Yc 0][ Cb ] = [Y], Cb >= 0 [Za Zb Zc 0][ Cc ] [Z] Cc >= 0 [ 1 1 1 0][lambda] [1] lambda>= 0 solve [Xa-X Xb-X Xc-X 1]-1[0] [Ya-Y Yb-Y Yc-Y 0] [0] [Za-Z Zb-Z Zc-Z 0] [0] >= 0 [ 1 1 1 0] [1] For arbitrary case, (i = dir/2) [V ei]-1[0] = V^-1ei ( bigger than 0 if dir%2==0 else 1) [e^T 0] [1] e^TV^-1ei */ switch(dim) { case 2: patch_[0] = (dir/2==0 ? patch[3]:-patch[2]); patch_[1] = (dir/2==0 ?-patch[1]: patch[0]); det = patch[0]*patch[3] - patch[1]*patch[2]; break; case 3: patch_[0] = (dir/2==0 ? patch[4]*patch[8]-patch[5]*patch[7] : (dir/2==1 ? patch[5]*patch[6]-patch[3]*patch[8] : patch[3]*patch[7]-patch[4]*patch[6])); patch_[1] = (dir/2==0 ? patch[2]*patch[7]-patch[1]*patch[8] : (dir/2==1 ? patch[0]*patch[8]-patch[2]*patch[6] : patch[1]*patch[6]-patch[0]*patch[7])); patch_[2] = (dir/2==0 ? patch[1]*patch[5]-patch[2]*patch[4] : (dir/2==1 ? patch[2]*patch[3]-patch[0]*patch[5] : patch[0]*patch[4]-patch[1]*patch[3])); det = patch_[0]*patch[dir/2] + patch_[1]*patch[dir/2+3] + patch_[2]*patch[dir/2+6]; break; default:for(unsigned char d = 0; d < dim; ++d) patch_[d] = (d == dir/2) ? 1 : 0; // Gauss elimination for(unsigned char i = 0; i < dim; ++i) { unsigned char pivot = i; for(unsigned char j = i + 1; j < dim; ++j) if(ABS(patch[pivot+dim*i]) < ABS(patch[j+dim*i])) pivot = j; if(ABS(patch[pivot+dim*i]) <= 0) return false; for(unsigned char j = 0; j < dim; ++j) if(j != pivot) { scalar_t factor = patch[j+dim*i] / patch[pivot+dim*i]; for(unsigned char k = i+1; k < dim; ++k) patch[j+dim*k] -= factor * patch[pivot+dim*k]; patch_[j] -= factor * patch[pivot]; } if(i != pivot) { for(unsigned char k = i; k < dim; ++k) { det = patch[i+dim*k]; patch[i+dim*k] = patch[pivot+dim*k]; patch[pivot+dim*k] = det; } det = patch_[i]; patch_[i] = patch_[pivot]; patch_[pivot] = det; } } det = 1; break;} if(det == 0) return false; intersect = (det > 0) ^ (dir % 2); for(unsigned char d = 0; d < dim; ++d) if(intersect ^ (patch_[d] < 0)) return false; return true; } template void __global__ search_inside_mesh_kernel(const index *tri_num, const index *tri_idx, const index *size, const scalar_t *_min, scalar_t step, const scalar_t *points_base, const index *tri, const scalar_t *points_query, const index points_num, scalar_t *signs) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(points_base == NULL || tri == NULL || points_query == NULL || tri_num == NULL || tri_idx == NULL || size == NULL || _min == NULL || step <= 0 || id >= points_num) return; const scalar_t *point = points_query + 3 * id; scalar_t *sign = signs + id; index x[dim+1], to_end[2*dim]; scalar_t patch[dim*dim]; unsigned char out_dim = 0; for(unsigned char d = 0; d < dim; ++d) { scalar_t xf = (point[d] - _min[d]) / step; if(xf < 0 || xf >= size[d]){ // return false; sign[0] = -1; return; } x[d] = (index)xf; to_end[2*d] = x[d]; to_end[2*d+1]= size[d]-1-x[d]; x[dim] = d > 0 ? x[dim] * size[d] + x[d] : x[d]; } for(unsigned char d = 1; d < 2*dim; ++d) if(to_end[d] < to_end[out_dim]) out_dim = d; // std::vector visited(1, 0); // thrust::device_vector visited(1, 0); index visited[16] = {}; index visited_size = 1; for(index i = 0; i <= to_end[out_dim]; ++i) { for(index j =(x[dim]==0?0:tri_num[x[dim]-1]); j < tri_num[x[dim]]; ++j) { for(unsigned char d = 0; d < dim; ++d) for(unsigned char d_= 0; d_< dim; ++d_) patch[d_+ d*dim] = points_base[d_+dim* tri[d+dim*tri_idx[j]-dim]]; if(intersect_tri(point, out_dim, patch, dim)) { bool find = false; for(index t = 1; t < visited_size; ++t) if(visited[t] == tri_idx[j]-1) { find = true; break; } if(!find) { // visited.resize(visited.size()+1); // visited[visited.size()-1] = tri_idx[j]-1; if(visited_size < sizeof(visited)/sizeof(visited[0])) visited[visited_size++] = tri_idx[j]-1; else { for(index i = 1; i+1 < sizeof(visited)/sizeof(visited[0]); ++i) visited[i] = visited[i+1]; visited[sizeof(visited)/sizeof(visited[0])-1] = tri_idx[j]-1; visited_size++; } } } } if(out_dim % 2 == 1) ++x[out_dim/2]; else --x[out_dim/2]; for(unsigned char d = 0; d < dim; ++d) x[dim] = d > 0 ? x[dim] * size[d] + x[d] : x[d]; } // return visited.size()-1; sign[0] = ((visited_size) % 2 == 0) ? 1 : -1; } void search_inside_mesh_cuda ( at::Tensor points, at::Tensor verts, at::Tensor faces, at::Tensor tri_num, at::Tensor tri_idx, at::Tensor num, at::Tensor minmax, float step, at::Tensor signs ) { if(points.sizes().size() != 2) points = points.reshape({-1,3}); int32_t points_num = points.size(0); const int threads = 512; const dim3 blocks (points_num / threads + 1, 1, 1); // make output // signs.resize_({points_num}); // signs.zero_(); AT_DISPATCH_FLOATING_TYPES(verts.type(), "search_inside_mesh_cuda", ([&] { search_inside_mesh_kernel<<>>( tri_num.data(), tri_idx.data(), num.data(), minmax.data(), step, verts.data(), faces.data(), points.data(), points_num, signs.data() ); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in search_inside_mesh_cuda: %s\n", cudaGetErrorString(err)); } template unsigned char __device__ ray_intersect_grid( const scalar_t start[dim], const scalar_t direction[dim], scalar_t step, const scalar_t min_[dim], const index num[dim + 1], index ind, bool first = false, scalar_t inter_point[dim] = NULL) { scalar_t _min[dim], _max[dim]; if(ind < num[dim]) { for(unsigned char d = dim - 1; d > 0; ind /= num[d--]) _max[d] = (_min[d] = min_[d] + step * (ind % num[d])) + step; _max[0] = (_min[0] = min_[0] + step * ind) + step; } else for(unsigned char d = 0; d < dim; ++d) _max[d] = (_min[d] = min_[d]) + step * num[d]; scalar_t min_dot = -1, point[dim]; unsigned out_dim = 2 * dim; for(unsigned char d = 0; d < dim; ++d) { const scalar_t *inter; if(first) { if(start[d] < _min[d] && direction[d] > 0) inter = _min; else if(start[d] > _max[d] && direction[d] < 0) inter = _max; else if(start[d] > _min[d] && direction[d] < 0) inter = _min; else if(start[d] < _max[d] && direction[d] > 0) inter = _max; else continue; } else { if(direction[d] > 0) inter = _max; else if(direction[d] < 0) inter = _min; else continue; } scalar_t dot = (inter[d] - start[d]) / direction[d]; if(dot < 0) continue; for(unsigned char d_= 0; d_< dim; ++d_) if(d_ != d) { point[d_] = start[d_] + direction[d_] * dot; if(point[d_] < _min[d_] || point[d_] > _max[d_]) { dot = min_dot; break; } } else point[d_] = inter[d_]; if(dot >= 0 && (min_dot < 0 || dot < min_dot)) { min_dot = dot; out_dim = 2 * d + (inter == _max); if(inter_point != NULL) for(unsigned char d_= 0; d_< dim; ++d_) inter_point[d_] = point[d_]; } } return out_dim; } template __device__ bool intersect_tri2(const scalar_t src[3], const scalar_t dir[3], const scalar_t va[3], const scalar_t vb[3], const scalar_t vc[3], scalar_t coeff[3] = NULL, bool both_direction = false, scalar_t precision = 1e-9) { scalar_t A[] = { va[0]-src[0], vb[0]-src[0], vc[0]-src[0], -dir[0], va[1]-src[1], vb[1]-src[1], vc[1]-src[1], -dir[1], va[2]-src[2], vb[2]-src[2], vc[2]-src[2], -dir[2], 1, 1, 1, 0}, A3inv[9], Ainv[4]; A3inv[0] = A[5]*A[10]- A[6]*A[9]; A3inv[1] = A[2]*A[9] - A[1]*A[10]; A3inv[2] = A[1]*A[6] - A[2]*A[5]; A3inv[3] = A[6]*A[8] - A[4]*A[10]; A3inv[4] = A[0]*A[10]- A[2]*A[8]; A3inv[5] = A[2]*A[4] - A[0]*A[6]; A3inv[6] = A[4]*A[9] - A[5]*A[8]; A3inv[7] = A[1]*A[8] - A[0]*A[9]; A3inv[8] = A[0]*A[5] - A[1]*A[4]; Ainv[0] =-A[3]*A3inv[0] - A[7]*A3inv[1] - A[11]*A3inv[2]; Ainv[1] =-A[3]*A3inv[3] - A[7]*A3inv[4] - A[11]*A3inv[5]; Ainv[2] =-A[3]*A3inv[6] - A[7]*A3inv[7] - A[11]*A3inv[8]; Ainv[3] = A[0]*A3inv[0] + A[4]*A3inv[1] + A[8]*A3inv[2]; scalar_t det = Ainv[0] + Ainv[1] + Ainv[2]; if(det > precision || det < -precision) { if(coeff != NULL) { coeff[0] = Ainv[0] / det; coeff[1] = Ainv[1] / det; coeff[2] = Ainv[2] / det; // coeff[3] = Ainv[3] / det; } if(det < 0) { for(unsigned i = 0; i < 4; ++i) Ainv[i] = -Ainv[i]; det = -det; } return Ainv[0] >=-precision && Ainv[1] >=-precision && Ainv[2] >=-precision && (both_direction || Ainv[3] >=-precision); } else { scalar_t norm = A[3]*A[3] + A[7]*A[7] + A[11]*A[11], S[] = { A3inv[0] + A3inv[3] + A3inv[6], A3inv[1] + A3inv[4] + A3inv[7], A3inv[2] + A3inv[5] + A3inv[8]}, area = S[0]*S[0] + S[1]*S[1] + S[2]*S[2]; if(norm <= precision) { // direction degenerate to a point if(area > precision) { Ainv[0] = A3inv[0]*S[0]+A3inv[1]*S[1]+A3inv[2]*S[2]; Ainv[1] = A3inv[3]*S[0]+A3inv[4]*S[1]+A3inv[5]*S[2]; Ainv[2] = A3inv[6]*S[0]+A3inv[7]*S[1]+A3inv[8]*S[2]; if(coeff != NULL) { coeff[0] = Ainv[0] / area; coeff[1] = Ainv[1] / area; coeff[2] = Ainv[2] / area; // coeff[3] = 0; } return Ainv[0] >=-precision && Ainv[1] >=-precision && Ainv[2] >=-precision && Ainv[3] >=-precision && Ainv[3] <= precision; } else { scalar_t e[] = { vc[0]-vb[0], vc[1]-vb[1], vc[2]-vb[2], va[0]-vc[0], va[1]-vc[1], va[2]-vc[2], vb[0]-va[0], vb[1]-va[1], vb[2]-va[2]}, l[] = { e[0]*e[0] + e[1]*e[1] + e[2]*e[2], e[3]*e[3] + e[4]*e[4] + e[5]*e[5], e[6]*e[6] + e[7]*e[7] + e[8]*e[8]}; unsigned i = (l[0] < l[1] ? 1 : 0), j, k; i = (l[i] < l[2] ? 2 : i); j = (i+1) % 3; k = (i+2) % 3; if(l[i] > precision) { // triangle degenerate to a segment Ainv[i] = A3inv[3*i] * A3inv[3*i] + A3inv[3*i+1] * A3inv[3*i+1]+ A3inv[3*i+2] * A3inv[3*i+2]; Ainv[j] = A[k]*e[3*i] + A[k+4]*e[3*i+1] + A[k+8]*e[3*i+2]; Ainv[k] =-A[j]*e[3*i] - A[j+4]*e[3*i+1] - A[j+8]*e[3*i+2]; if(coeff != NULL) { coeff[i] = 0; coeff[j] = Ainv[j] / l[i]; coeff[k] = Ainv[k] / l[i]; // coeff[3] = 0; } return Ainv[i] <= precision && Ainv[j] >=-precision && Ainv[k] >=-precision && Ainv[3] >=-precision && Ainv[3] <= precision; } else { // triangle degenerate to a point Ainv[i] = A[i]*A[i] + A[i+4]*A[i+4] + A[i+8]*A[i+8]; if(coeff != NULL) { coeff[i] = 1; coeff[j] = 0; coeff[k] = 0; // coeff[3] = 0; } return Ainv[i] <= precision && Ainv[3] >=-precision && Ainv[3] <= precision; } } } else { if(area <= precision) { scalar_t e[] = { vc[0]-vb[0], vc[1]-vb[1], vc[2]-vb[2], va[0]-vc[0], va[1]-vc[1], va[2]-vc[2], vb[0]-va[0], vb[1]-va[1], vb[2]-va[2]}, l[] = { e[0]*e[0] + e[1]*e[1] + e[2]*e[2], e[3]*e[3] + e[4]*e[4] + e[5]*e[5], e[6]*e[6] + e[7]*e[7] + e[8]*e[8]}; unsigned i = (l[0] < l[1] ? 1 : 0), j, k; i = (l[i] < l[2] ? 2 : i); j = (i+1) % 3; k = (i+2) % 3; if(l[i] <= precision) { // triangle degenerate to a point scalar_t cross[] = { A[i+4]*A[11]-A[i+8]*A[7], A[i+8]*A[3] -A[i] *A[11], A[i] *A[7] -A[i+4]*A[3]}; Ainv[i]=cross[0] * cross[0] + cross[1] * cross[1] + cross[2] * cross[2]; Ainv[3]=-A[i]*A[3] - A[i+4]*A[7] - A[i+8]*A[11]; if(coeff != NULL) { coeff[i] = 1; coeff[j] = 0; coeff[k] = 0; // coeff[3] = Ainv[3] / norm; } return Ainv[i] <= precision && (both_direction || Ainv[3] >=-precision); } else { // triangle degenerate to a segment scalar_t norm_ = A3inv[3*i] * A3inv[3*i] + A3inv[3*i+1]* A3inv[3*i+1]+ A3inv[3*i+2]* A3inv[3*i+2]; if(norm_ > precision) { scalar_t cross[] = { A[j+4]*A[11]-A[j+8]*A[7], A[j+8]*A[3] -A[j] *A[11], A[j] *A[7] -A[j+4]*A[3], A[k+4]*A[11]-A[k+8]*A[7], A[k+8]*A[3] -A[k] *A[11], A[k] *A[7] -A[k+4]*A[3]}; Ainv[j] = A3inv[3*i] * cross[3] + A3inv[3*i+1] * cross[4] + A3inv[3*i+2] * cross[5]; Ainv[k] =-A3inv[3*i] * cross[0] - A3inv[3*i+1] * cross[1] - A3inv[3*i+2] * cross[2]; Ainv[3] = Ainv[j] + Ainv[k]; } else { // starting point is on the segment Ainv[j] = A[k]*e[3*i] + A[k+4]*e[3*i+1] + A[k+8]*e[3*i+2]; Ainv[k] =-A[j]*e[3*i] - A[j+4]*e[3*i+1] - A[j+8]*e[3*i+2]; Ainv[3] = l[i]; } if(coeff != NULL) { if(Ainv[3] >=-precision && Ainv[3] <= precision) Ainv[3] = precision; coeff[i] = 0; coeff[j] = Ainv[j] / Ainv[3]; coeff[k] = Ainv[k] / Ainv[3]; // coeff[3] = norm_ / Ainv[3]; } return Ainv[i] >=-precision && Ainv[i] <= precision && Ainv[j] >=-precision && Ainv[k] >=-precision && (both_direction || Ainv[3] > precision); } } else { // direction parallel to triangle Ainv[0] = A3inv[0]*S[0]+A3inv[1]*S[1]+A3inv[2]*S[2]; Ainv[1] = A3inv[3]*S[0]+A3inv[4]*S[1]+A3inv[5]*S[2]; Ainv[2] = A3inv[6]*S[0]+A3inv[7]*S[1]+A3inv[8]*S[2]; unsigned i = (Ainv[0] < Ainv[1] ? 0 : 1), j, k; i = (Ainv[i] < Ainv[2] ? i : 2); j = (i+1) % 3; k = (i+2) % 3; if(Ainv[k] < -precision) { k = j; j = i; i = 3 - j - k; } if(Ainv[j] < -precision) { scalar_t cross[] = { A[i+4]*A[11]-A[i+8]*A[7], A[i+8]*A[3] -A[i] *A[11], A[i] *A[7] -A[i+4]*A[3], A[j+4]*A[11]-A[j+8]*A[7], A[j+8]*A[3] -A[j] *A[11], A[j] *A[7] -A[j+4]*A[3], A[k+4]*A[11]-A[k+8]*A[7], A[k+8]*A[3] -A[k] *A[11], A[k] *A[7] -A[k+4]*A[3]}; scalar_t dot[] = { A3inv[3*i] * cross[6] + A3inv[3*i+1]* cross[7] + A3inv[3*i+2]* cross[8], -A3inv[3*i] * cross[3] - A3inv[3*i+1]* cross[4] - A3inv[3*i+2]* cross[5], A3inv[3*j] * cross[0] + A3inv[3*j+1]* cross[1] + A3inv[3*j+2]* cross[2], -A3inv[3*j] * cross[6] - A3inv[3*j+1]* cross[7] - A3inv[3*j+2]* cross[8]}; scalar_t sum[] = {dot[0]+dot[1], dot[2]+dot[3]}; scalar_t norm[]= { A3inv[3*i] * A3inv[3*i] + A3inv[3*i+1]* A3inv[3*i+1]+ A3inv[3*i+2]* A3inv[3*i+2], A3inv[3*j] * A3inv[3*j] + A3inv[3*j+1]* A3inv[3*j+1]+ A3inv[3*j+2]* A3inv[3*j+2]}; bool valid[] = { dot[0] >=-precision && dot[1] >=-precision && (both_direction || norm[0] > precision), dot[2] >=-precision && dot[3] >=-precision && (both_direction || norm[1] > precision)}; if(coeff != NULL) { if(valid[0]) { coeff[i] = 0; coeff[j] = dot[0] / sum[0]; coeff[k] = dot[1] / sum[0]; // coeff[3] = norm[0] / sum[0]; } else { coeff[i] = dot[3] / sum[1]; coeff[j] = 0; coeff[k] = dot[2] / sum[1]; // coeff[3] = norm[1] / sum[1]; } } return (valid[0] || valid[1]) && Ainv[3] >=-precision && Ainv[3] <= precision; } else if(Ainv[i] < -precision) { scalar_t cross[] = { A[j+4]*A[11]-A[j+8]*A[7], A[j+8]*A[3] -A[j] *A[11], A[j] *A[7] -A[j+4]*A[3], A[k+4]*A[11]-A[k+8]*A[7], A[k+8]*A[3] -A[k] *A[11], A[k] *A[7] -A[k+4]*A[3]}; Ainv[j] = A3inv[3*i] * cross[3] + A3inv[3*i+1] * cross[4] + A3inv[3*i+2] * cross[5]; Ainv[k] =-A3inv[3*i] * cross[0] - A3inv[3*i+1] * cross[1] - A3inv[3*i+2] * cross[2]; Ainv[i] = Ainv[j] + Ainv[k]; // scalar_t norm_ = // A3inv[3*i] * A3inv[3*i] + // A3inv[3*i+1]* A3inv[3*i+1]+ // A3inv[3*i+2]* A3inv[3*i+2]; if(coeff != NULL) { if(Ainv[i] >=-precision && Ainv[i] <= precision) Ainv[i] = precision; coeff[i] = 0; coeff[j] = Ainv[j] / Ainv[i]; coeff[k] = Ainv[k] / Ainv[i]; // coeff[3] = norm_ / Ainv[i]; } return Ainv[j] >=-precision && Ainv[k] >=-precision && Ainv[3] >=-precision && Ainv[3] <= precision && (both_direction || Ainv[i] > precision); } else if(coeff != NULL) { coeff[0] = Ainv[0] / area; coeff[1] = Ainv[1] / area; coeff[2] = Ainv[2] / area; // coeff[3] = 0; } return Ainv[i] >=-precision && Ainv[3] >=-precision && Ainv[3] <= precision; } } } } template __global__ void search_ray_grid_kernel( const index *tri_num, const index *tri_idx, const index *size, const scalar_t *_min, scalar_t step, const scalar_t *points_base, const index *tri, const scalar_t *_origin, const scalar_t *_direction, bool *_valid, index points_num, scalar_t *coeff = NULL, index exclude_ind = 0, bool both_dir = false, scalar_t max_r2 = 0) { // const unsigned char dim = 3; const int id = blockIdx.x * blockDim.x + threadIdx.x; const scalar_t precision = 1e-9; if(points_base == NULL || tri == NULL || _origin == NULL || _direction == NULL || tri_num == NULL || tri_idx == NULL || size == NULL || _min == NULL || step <= 0 || id >= points_num || _valid == NULL) return; bool *valid = _valid + id; const scalar_t *origin = _origin + id * 3; const scalar_t *direction = _direction + id * 3; index inter_ind = tri_num[size[dim]-1], x[dim*2+2]; scalar_t inter_point[dim], _coeff[dim+1], direction_[] = {-direction[0],-direction[1],-direction[2]}, dist2 = 0, e = 0, dis2 = (max_r2 <= 0 ? -1 : max_r2); unsigned char out_dim[2] = {0, 2 * dim}; for(unsigned char d = 0; d < dim; ++d) { dist2 += direction[d] * direction[d]; scalar_t xf = (origin[d] - _min[d]) / step; if(xf < 0 || xf >= size[d]) { x[dim] = size[dim]; break; } x[dim+1+d] = x[d] = (index)xf; x[dim] = d > 0 ? x[dim] * size[d] + x[d] : x[d]; } if(dist2 < precision) { valid[0] = (inter_ind != tri_num[size[dim]-1]); return; } if(x[dim] >= size[dim]) { out_dim[0] = ray_intersect_grid(origin, direction, step, _min, size, size[dim], true, inter_point); if(out_dim[0] >= 2 * dim && !both_dir){ valid[0] = false; return; } for(unsigned char d = 0; d < dim; ++d) { scalar_t xf = (inter_point[d] - _min[d]) / step; xf = (xf < 0 ? 0 :(xf >= size[d] ? size[d]-1 : floor(xf))); x[d] = (index)xf; x[dim] = d > 0 ? x[dim] * size[d] + x[d] : x[d]; } if(both_dir) { out_dim[1] = ray_intersect_grid(origin,direction_, step, _min, size, size[dim], true, inter_point); if(out_dim[1] < 2 * dim) { for(unsigned char d = 0; d < dim; ++d) { scalar_t xf = (inter_point[d] - _min[d]) / step; xf = (xf < 0 ? 0 :(xf >= size[d]?size[d]-1:floor(xf))); x[dim+1+d] = (index)xf; x[dim+1+dim] = d > 0 ? x[dim*2+1]*size[d] + x[dim+1+d] : x[dim+1+d]; } } else if(out_dim[0] >= 2 * dim){ valid[0] = (inter_ind != tri_num[size[dim]-1]); return; } } } else if(both_dir) { out_dim[1] = 0; x[dim*2+1] = x[dim]; } while(out_dim[0] < 2 * dim || out_dim[1] < 2 * dim) { for(index j = (x[dim]==0?0:tri_num[x[dim]-1]); j < tri_num[x[dim]]; ++j) { if(exclude_ind > 0) { if(tri[dim*tri_idx[j]-3] == exclude_ind-1 || tri[dim*tri_idx[j]-2] == exclude_ind-1 || tri[dim*tri_idx[j]-1] == exclude_ind-1) continue; } else if(exclude_ind+1+tri_idx[j] == 0) continue; if(intersect_tri2(origin, direction, points_base + dim * tri[dim * tri_idx[j] - 3], points_base + dim * tri[dim * tri_idx[j] - 2], points_base + dim * tri[dim * tri_idx[j] - 1], _coeff, false, precision)) { dist2 = 0; for(unsigned char d = 0; d < dim; ++d) { inter_point[d] = _coeff[0]*points_base[d+dim*tri[dim*tri_idx[j]-3]]+ _coeff[1]*points_base[d+dim*tri[dim*tri_idx[j]-2]]+ _coeff[2]*points_base[d+dim*tri[dim*tri_idx[j]-1]]; e = inter_point[d] - origin[d]; dist2 += e * e; } out_dim[0] = 2 * dim; if(dis2 < 0 || dist2 < dis2) { if(coeff != NULL) for(unsigned char d = 0; d < dim; ++d) coeff[d] = _coeff[d]; inter_ind = tri_idx[j] - 1; dist2 = dis2; } } } if(out_dim[1] < 2 * dim) { for(index j = (x[dim*2+1]==0?0:tri_num[x[dim*2+1]-1]); j < tri_num[x[dim*2+1]]; ++j) { if(exclude_ind > 0) { if(tri[dim*tri_idx[j]-3] == exclude_ind-1 || tri[dim*tri_idx[j]-2] == exclude_ind-1 || tri[dim*tri_idx[j]-1] == exclude_ind-1) continue; } if(intersect_tri2(origin, direction_, points_base + dim * tri[dim * tri_idx[j] - 3], points_base + dim * tri[dim * tri_idx[j] - 2], points_base + dim * tri[dim * tri_idx[j] - 1], _coeff, false, precision)) { dist2 = 0; for(unsigned char d = 0; d < dim; ++d) { inter_point[d] = _coeff[0]*points_base[d+dim*tri[dim*tri_idx[j]-3]]+ _coeff[1]*points_base[d+dim*tri[dim*tri_idx[j]-2]]+ _coeff[2]*points_base[d+dim*tri[dim*tri_idx[j]-1]]; e = inter_point[d] - origin[d]; dist2 += e * e; } out_dim[1] = 2 * dim; if(dis2 < 0 || dist2 < dis2) { if(coeff != NULL) for(unsigned char d = 0; d < dim; ++d) coeff[d] = _coeff[d]; inter_ind = tri_idx[j] - 1; dist2 = dis2; } } } if(out_dim[1] < 2 * dim) { out_dim[1] = ray_intersect_grid(origin,direction_, step, _min, size, x[dim*2+1], false, inter_point); if(dis2 >= 0) { dist2 = 0; for(unsigned char d = 0; d < dim; ++d) { e = inter_point[d] - origin[d]; dist2 += e * e; } if(dist2 > dis2) out_dim[1] = 2 * dim; } if(out_dim[1] < 2 * dim) { if(out_dim[1] % 2 == 1) { if(x[dim+1+out_dim[1]/2] == size[out_dim[1]/2] - 1) out_dim[1] = 2 * dim; else ++x[dim+1+out_dim[1]/2]; } else { if(x[dim+1+out_dim[1]/2] == 0) out_dim[1] = 2 * dim; else --x[dim+1+out_dim[1]/2]; } } } if(out_dim[1] < 2 * dim) { for(unsigned char d = 0; d < dim; ++d) x[dim*2+1] = d > 0 ? x[dim*2+1]*size[d]+x[dim+1+d]:x[dim+1+d]; } else if(out_dim[0] >= 2 * dim) { valid[0] = (inter_ind != tri_num[size[dim]-1]); return; } } else if(out_dim[0] >= 2 * dim){ valid[0] = (inter_ind != tri_num[size[dim]-1]); return; } out_dim[0] = ray_intersect_grid(origin, direction, step, _min, size, x[dim], false, inter_point); if(dis2 >= 0) { dist2 = 0; for(unsigned char d = 0; d < dim; ++d) { e = inter_point[d] - origin[d]; dist2 += e * e; } if(dist2 > dis2) out_dim[0] = 2 * dim; } if(out_dim[0] < 2 * dim) { if(out_dim[0] % 2 == 1) { if(x[out_dim[0]/2] == size[out_dim[0]/2] - 1) out_dim[0] = 2 * dim; else ++x[out_dim[0]/2]; } else { if(x[out_dim[0]/2] == 0) out_dim[0] = 2 * dim; else --x[out_dim[0]/2]; } if(out_dim[0] < 2 * dim) for(unsigned char d = 0; d < dim; ++d) x[dim] = d > 0 ? x[dim]*size[d]+x[d] : x[d]; } } valid[0] = (inter_ind != tri_num[size[dim]-1]); return; } void search_intersect_cuda ( at::Tensor origins, at::Tensor directions, at::Tensor verts, at::Tensor faces, at::Tensor tri_num, at::Tensor tri_idx, at::Tensor num, at::Tensor minmax, float step, at::Tensor intersect ) { if(origins.sizes().size() != 2) origins = origins.reshape({-1,3}); if(directions.sizes().size() != 2) directions = directions.reshape({-1,3}); int32_t points_num = origins.size(0); const int threads = 512; const dim3 blocks (points_num / threads + 1, 1, 1); // make output // intersect.resize_({points_num}); // intersect.zero_(); AT_DISPATCH_FLOATING_TYPES(verts.type(), "search_intersect_cuda", ([&] { search_ray_grid_kernel<<>>( tri_num.data(), tri_idx.data(), num.data(), minmax.data(), step, verts.data(), faces.data(), origins.data(), directions.data(), intersect.data(), points_num ); })); // __global__ void search_ray_grid_kernel( // const index *tri_num, const index *tri_idx, // const index *size, const scalar_t *_min, scalar_t step, // const scalar_t *points_base, const index *tri, // const scalar_t *_origin, const scalar_t *_direction, // bool *_valid, index points_num, // scalar_t *coeff = NULL, index exclude_ind = 0, // bool both_dir = false, scalar_t max_r2 = 0) cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in search_intersect_cuda: %s\n", cudaGetErrorString(err)); } ================================================ FILE: extensions/mesh_grid/mesh_grid_searcher.py ================================================ import torch import trimesh from mesh_grid import (cumsum, insert_grid_surface, search_inside_mesh, search_intersect, search_nearest_point) class MeshGridSearcher: def __init__(self, verts=None, faces=None): if verts is not None and faces is not None: self.set_mesh(verts, faces) def set_mesh(self, verts, faces): self.verts = verts self.faces = faces _min, _ = torch.min(verts, 0) _max, _ = torch.max(verts, 0) self.step = (torch.cumprod(_max - _min, 0)[-1] / len(verts))**(1. / 3.) l = _max - _min c = (_max + _min) / 2 l = torch.max(torch.floor(l / self.step), torch.zeros_like(l)) + 1 _min_step = c - self.step * l / 2 self.num = torch.cat([l, torch.cumprod(l, 0)[-1:]]).int() self.minmax = torch.cat([_min_step, _max]) self.tri_num = torch.zeros(self.num[-1], dtype=torch.int32).to(verts.device) self.tri_idx = insert_grid_surface(self.verts, self.faces, self.minmax, self.num, self.step, self.tri_num) def nearest_points(self, points): points = points.to(self.verts.device) nearest_faces = torch.zeros(points.shape[-2], dtype=torch.int32).to(self.verts.device) coeff = torch.zeros(points.shape, dtype=torch.float32).to(self.verts.device) nearest_pts = torch.zeros_like(coeff) search_nearest_point(points, self.verts, self.faces, self.tri_num, self.tri_idx, self.num, self.minmax, self.step, nearest_faces, nearest_pts, coeff) return nearest_pts, nearest_faces def inside_mesh(self, points): points = points.to(self.verts.device) inside = torch.zeros(points.shape[-2], dtype=torch.float32).to(self.verts.device) search_inside_mesh(points, self.verts, self.faces, self.tri_num, self.tri_idx, self.num, self.minmax, self.step, inside) return inside def intersects_any(self, origins, directions): origins = origins.to(self.verts.device) directions = directions.to(self.verts.device) intersect = torch.zeros(origins.shape[-2], dtype=torch.bool).to(self.verts.device) search_intersect(origins, directions, self.verts, self.faces, self.tri_num, self.tri_idx, self.num, self.minmax, self.step, intersect) return intersect ================================================ FILE: extensions/mesh_grid/render.cpp ================================================ #include #include #include #include #include #ifdef USE_CUDA template index zbuffer_forward(index,index,index,index,const scalar*,const index*, scalar*,vector*,index*,scalar*,bool*,bool,scalar); template bool zbuffer_forward_gpu(index,index,index,index,const scalar*,const index*, index*,scalar*, bool*,bool,scalar); #else #include "render.h" #endif #include using namespace torch; template index zbuffer_forward_cpu(index h, index w, index n, index f, const scalar *v, const index *tri, index *ind, scalar *coeff, bool *vis, bool persp, scalar eps) { scalar *zbuf = (scalar*)malloc(sizeof(scalar)*h*w); std::vector > ibuf(h*w); for(index i = 0; i < h*w; ++i) { zbuf[i] = std::numeric_limits::max(); ibuf[i].clear(); } index r = zbuffer_forward > (h, w, n, f, v, tri, zbuf, ibuf.data(), ind, coeff, vis, persp, eps); free(zbuf); return r; } std::vector render_forward(Tensor verts, Tensor tri, uint64_t h, uint64_t w, bool persp, double eps = 1e-6) { uint64_t n = verts.size(0), f = tri.size(0); bool cuda = verts.type().is_cuda(); Tensor index =-torch::ones({(int64_t)h,(int64_t)w}, cuda ? CUDA(kLong) : CPU(kLong)), visual= torch::ones({(int64_t)n}, cuda ? CUDA(kBool) : CPU(kBool)), coeff; switch(verts.type().scalarType()) { case torch::ScalarType::Float: coeff = torch::zeros({(int64_t)h,(int64_t)w,3}, cuda ? CUDA(kFloat) : CPU(kFloat)); if(cuda) { #ifdef USE_CUDA zbuffer_forward_gpu( (int64_t)h,(int64_t)w,(int64_t)n,(int64_t)f, verts.data(), tri.data(), index.data(),coeff.data(),visual.data(), persp, (float)eps); #endif } else { zbuffer_forward_cpu( (int64_t)h,(int64_t)w,(int64_t)n,(int64_t)f, verts.data(), tri.data(), index.data(),coeff.data(),visual.data(), persp, (float)eps); } break; default: break;} return {index, coeff, visual}; } PYBIND11_MODULE(_render, m) { m.def("forward", &render_forward); } ================================================ FILE: extensions/mesh_grid/render.cu ================================================ #ifndef USE_CUDA #define USE_CUDA #endif #include #include #include "render.h" template static inline __device__ __host__ scalar numeric_max() { if((scalar)-1 > 0) return (scalar)-1; bool is_float = ((scalar)1.1 != (scalar)1); switch(sizeof(scalar)) { case 8: if(is_float) { return (scalar)1.7976931348623157879e308; } else return (scalar)9223372036854775807; case 4: if(is_float) { return (scalar)3.40282346638528875558e38f; } else return (scalar)2147483647; case 2: return (scalar)32767; default:return (scalar)127;} } template class vector_gpu { public: __device__ vector_gpu(uint64_t n = 0): ptr(NULL), len(n), mutex(0) { if(n > 0) { n = allocate(len); ptr = (T*)malloc(sizeof(T) * n); if(ptr == NULL) len = 0; } } __device__ ~vector_gpu() { if(ptr != NULL) free(ptr); } __device__ uint64_t size() const {return len;} __device__ T &operator[](uint64_t i) const { return ptr[i % len]; } __device__ void clear() { while(ptr != NULL) if(atomicCAS(&mutex, 0, 1) == 0) { free(ptr); len = 0; ptr = NULL; atomicExch(&mutex, 0); } } __device__ bool push_back(T p) { bool inserted = true; bool blocked = true; while(blocked) if(atomicCAS(&mutex, 0, 1) == 0) { if(len % bufsize == 0) { T*tmp = (T*)malloc(sizeof(T) *(len+bufsize)); if(inserted = (tmp != NULL)) { for(uint64_t i = 0; i < len; ++i) tmp[i] = ptr[i]; free(ptr); ptr = tmp; } } if(inserted) ptr[len++] = p; atomicExch(&mutex, 0); blocked = false; } return inserted; } protected: inline __device__ uint64_t allocate(uint64_t n) { return ((n + bufsize - 1) % bufsize) * bufsize; } mutable T*ptr; uint64_t len; int mutex; }; template __global__ void zbuffer_forward_kernel(index h,index w,index n,index f, const scalar *v, const index *tri, scalar *zbuf, vector_gpu *ibuf, index *i, scalar *coeff, bool *vis, bool persp, scalar eps) { index st = 0, ed = h*w; for(index i = st; i < ed; ++i) zbuf[i] = numeric_max(); zbuffer_forward >( h, w, n, f, v, tri, zbuf, ibuf, i, coeff, vis, persp, eps); } #include template bool zbuffer_forward_gpu(index h, index w, index n, index f, const scalar *v, const index *tri, index *ind, scalar *coeff, bool *vis, bool persp, scalar eps) { vector_gpu *ibuf = NULL; scalar *zbuf = NULL; cudaMalloc((void**)&ibuf, sizeof(vector_gpu) * h * w); if(ibuf == NULL) return false; cudaMemset(ibuf, 0, sizeof(vector_gpu) * h * w); cudaMalloc((void**)&zbuf, sizeof(scalar) * h * w); if(zbuf == NULL) {cudaFree(ibuf); return false;} index threads = 512; zbuffer_forward_kernel<<<1,threads>>>(h, w, n, f, v, tri, zbuf, ibuf, ind, coeff, vis, persp, eps); cudaError_t e = cudaGetLastError(); if(e != cudaSuccess) std::cout << cudaGetErrorString(e) << std::endl; cudaFree(zbuf); cudaFree(ibuf); return e == cudaSuccess; } #include #define IMPLEMENT(scalar) \ template int64_t zbuffer_forward >( \ int64_t,int64_t,int64_t,int64_t,const scalar*,const int64_t*,scalar*, \ std::vector*,int64_t*,scalar*,bool*,bool,scalar); \ template bool zbuffer_forward_gpu(int64_t,int64_t,int64_t,int64_t, \ const scalar*,const int64_t*,int64_t*,scalar*, bool*,bool,scalar); IMPLEMENT(float) ================================================ FILE: extensions/mesh_grid/render.h ================================================ #ifndef _RENDER_H_ #define _RENDER_H_ #ifndef __device__ #define __device__ #endif #ifndef __host__ #define __host__ #endif #include #include #include #ifdef USE_CUDA static __device__ float atomicMin(float* address, float val) { int* address_as_i = (int*) address; int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(fminf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } #endif template inline __device__ bool split_for_loop(index &st, index &ed, index stride = 1) { #ifdef __CUDA_ARCH__ index num = gridDim.x * blockDim.x; num = (ed + num * stride - 1 - st) / (num * stride); st = st + (blockIdx.x*blockDim.x + threadIdx.x) * num * stride; ed = st + num * stride < ed ? st + num * stride : ed; #endif return st < ed; } template __device__ __host__ unsigned char process_one_tri(const scalar v[9], index w, index h, index bbox[4], scalar Ainv[9], scalar eps, bool double_face = false) { scalar umin = (scalar)w, vmin = (scalar)h, umax = 0, vmax = 0; if(v != NULL) for(unsigned char i = 0; i < 3; ++i) if(i == 0) { umax = umin = v[3*i]; vmax = vmin = v[3*i+1]; } else { if(umin > v[3*i]) umin = v[3*i]; else if(umax < v[3*i]) umax = v[3*i]; if(vmin > v[3*i+1]) vmin = v[3*i+1]; else if(vmax = w ? w-1: umax); bbox[2] = (index)(vmin < 0 ? 0 : vmin); bbox[3] = (index)(vmax >= h ? h-1: vmax); if(bbox[1] < bbox[0] || bbox[3] < bbox[2]) return false; } if(Ainv == NULL) return false; unsigned char type = 0; Ainv[6] = v[3]*v[7]-v[6]*v[4]; Ainv[7] = v[6]*v[1]-v[0]*v[7]; Ainv[8] = v[0]*v[4]-v[3]*v[1]; scalar det = Ainv[6] + Ainv[7] + Ainv[8]; if(!double_face && det > eps) return false; Ainv[0] = v[4]-v[7]; Ainv[1] = v[7]-v[1]; Ainv[2] = v[1]-v[4]; Ainv[3] = v[6]-v[3]; Ainv[4] = v[0]-v[6]; Ainv[5] = v[3]-v[0]; if(det <= eps && det >= -eps) { scalar l2[] = { Ainv[0]*Ainv[0]+Ainv[3]*Ainv[3], Ainv[1]*Ainv[1]+Ainv[4]*Ainv[4], Ainv[2]*Ainv[2]+Ainv[5]*Ainv[5]}; unsigned char i = (l2[0] > l2[1] ? 0 : 1), j, k; i = (l2[i] > l2[2] ? i : 2); j = (i+1)%3; k = (j+1)%3; if(l2[i] > eps*eps) { type = (1< __device__ __host__ bool normalize_coeff(scalar c[3], const scalar uv[2], const scalar Ainv[9], unsigned char t, scalar eps) { unsigned char i = 0, j = 1, k = 2; switch(t) { case 7: c[0] = Ainv[0]*uv[0] + Ainv[3]*uv[1] + Ainv[6]; c[1] = Ainv[1]*uv[0] + Ainv[4]*uv[1] + Ainv[7]; c[2] = Ainv[2]*uv[0] + Ainv[5]*uv[1] + Ainv[8]; return (c[0] >= -eps && c[1] >= -eps && c[2] >= -eps); case 3: case 5: case 6: i = (7-t)/2; j = (i+1)%3; k = (j+1)%3; c[0] = Ainv[0]*uv[0] + Ainv[3]*uv[1] + Ainv[6]; c[1] = Ainv[1]*uv[0] + Ainv[4]*uv[1] + Ainv[7]; c[2] = Ainv[2]*uv[0] + Ainv[5]*uv[1] + Ainv[8]; if(c[i]*c[i] > eps*eps) return false; c[i] = 0; return (c[j] >= -eps && c[k] >= -eps); case 1: case 2: case 4: i = t/2; j = (i+1)%3; k = (j+1)%3; c[j] = (uv[0] - Ainv[0]); c[k] = (uv[1] - Ainv[1]); c[i] = (c[j]*c[j] + c[k]*c[k]); if(c[i] > eps*eps) return false; c[j] = c[k] = 0; c[i] = 1; return true; default:return false;} } template __device__ __host__ index zbuffer_forward(index h, index w, index n, index f, const scalar*v, const index *tri, scalar *zbuf, vector *ibuf, index *ind, scalar*coeff, bool*vis, bool persp, scalar eps) { index st = 0, ed = n, count = 0; #ifdef __CUDA_ARCH__ split_for_loop(st, ed); #endif for(index i = st; i < ed; ++i) { scalar x = v[3*i], y = v[3*i+1]; if(persp) { if(v[3*i+2] <= eps) { vis[i] = false; continue; } else { x /= v[3*i+2]; y /= v[3*i+2]; } } x = floor(x); y = floor(y); if(x < 0 || y < 0 || x >= (scalar)w || y >= (scalar)h) { vis[i] = false; continue; } else { index j = (index)x + (index)y * w; vis[i] = true; ibuf[j].push_back(i); } } st = 0; ed = f; #ifdef __CUDA_ARCH__ __syncthreads(); split_for_loop(st, ed); #endif scalar Ainv[9], c[3], uv[2], z; index bbox[4]; unsigned char t = 0; for(index i = st; i < ed; ++i) { if((v[3*tri[3*i] +2] <= eps || v[3*tri[3*i+1]+2] <= eps || v[3*tri[3*i+2]+2] <= eps) && persp) continue; scalar v_[] = { v[3*tri[3*i]], v[3*tri[3*i]+1], v[3*tri[3*i] +2], v[3*tri[3*i+1]],v[3*tri[3*i+1]+1],v[3*tri[3*i+1]+2], v[3*tri[3*i+2]],v[3*tri[3*i+2]+1],v[3*tri[3*i+2]+2]}; if(persp) for(unsigned char j = 0; j < 3; ++j) { v_[3*j] /= v_[3*j+2]; v_[3*j+1]/= v_[3*j+2]; } if((t = process_one_tri(v_, w, h, bbox, Ainv, eps))) for(index y = bbox[2]; y <= bbox[3]; ++y) for(index x = bbox[0]; x <= bbox[1]; ++x) { ++count; index j = x + y*w; uv[0] = (scalar)x; uv[1] = (scalar)y; if(normalize_coeff(c, uv, Ainv, t, eps)) { if(persp) { c[0] /= v_[2]; c[1] /= v_[5]; c[2] /= v_[8]; z = c[0] + c[1] + c[2]; if(z <= eps) continue; c[0] /= z; c[1] /= z; c[2] /= z; z = 1./ z; } else z = c[0]*v_[2] + c[2]*v_[5] + c[2]*v_[8]; #ifdef __CUDA_ARCH__ if(atomicMin(zbuf + j, z) > z) #else if(zbuf[j] > z) #endif { zbuf[j] = z; ind[j] = i; coeff[3*j] = c[0]; coeff[3*j+1]= c[1]; coeff[3*j+2]= c[2]; } } for(index k = 0; k < ibuf[j].size(); ++k) { if(ibuf[j][k] == tri[3*i] || ibuf[j][k] == tri[3*i+1] || ibuf[j][k] == tri[3*i+2]) continue; uv[0] = v[3*ibuf[j][k]]; uv[1] = v[3*ibuf[j][k]+1]; if(persp) { uv[0] /= v[3*ibuf[j][k]+2]; uv[1] /= v[3*ibuf[j][k]+2]; } if(normalize_coeff(c, uv, Ainv, t, eps)) { if(persp) { c[0] /= v_[2]; c[1] /= v_[5]; c[2] /= v_[8]; z = c[0] + c[1] + c[2]; if(z <= eps) continue; c[0] /= z; c[1] /= z; c[2] /= z; z = 1./ z; } else z = c[0]*v_[2] + c[2]*v_[5] + c[2]*v_[8]; if(z <= v[3*ibuf[j][k]+2]) vis[ibuf[j][k]] = false; } } } } st = 0; ed = h*w; #ifdef __CUDA_ARCH__ __syncthreads(); split_for_loop(st, ed); #endif for(index i = st; i < ed; ++i) ibuf[i].clear(); #ifdef __CUDA_ARCH__ __syncthreads(); #endif return count; } #endif ================================================ FILE: extensions/mesh_grid/setup.py ================================================ import unittest from setuptools import find_packages, setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension CUDA_FLAGS = [] INSTALL_REQUIREMENTS = [] ext_modules = [ CUDAExtension('mesh_grid', [ 'mesh_grid.cpp', 'mesh_grid_kernel.cu', ]), ] setup(ext_modules=ext_modules, cmdclass={'build_ext': BuildExtension}) ================================================ FILE: extensions/mesh_grid/surface_inside.cpp ================================================ #define USE_CUDA #ifdef USE_CUDA #include #include template extern scalar surface_inside_integral(unsigned char,index, const scalar*,const index*,const scalar*,scalar*,scalar=1e-6); template extern bool surface_inside_gpu(index,index,index,char*, const scalar*,const scalar*,const index*,scalar=1e-6, const scalar* =NULL,const index* =NULL,const index* =NULL,const index* =NULL); template extern scalar surface_inside_grid(unsigned char,index,const scalar*, const index*,const scalar*,scalar*,const scalar*,const index*, const index*,const index*,index = 256); #else #include "surface_inside.h" #endif #include "torch_util.h" template index surface_inside_cpu(index n, index d, index m, char *inside, const scalar *points, const scalar *v, const index *tri, scalar eps = 1e-6, const scalar *_min_step = NULL, const index *size = NULL, const index *tri_num = NULL, const index *tri_idx = NULL) { bool has_grid =(_min_step != NULL && size != NULL && tri_num != NULL && tri_idx != NULL); index num = 0; eps = (eps < 0 ? -eps : eps); scalar *patch = (scalar*)malloc(sizeof(scalar) * d * d); if(patch == NULL) return 0; if(has_grid) { for(index i = 0; i < n; ++i) { scalar r = surface_inside_grid( d, m, v, tri, points + d*i, patch, _min_step, size, tri_num, tri_idx); if(inside != NULL) { if((r - floor(r)) <= eps) { inside[i] = ((index)floor(r < 0 ? -r : r) % 2); num += inside[i]; } else inside[i] = -1; // on the boundary } } } else for(index i = 0; i < n; ++i) { scalar r = surface_inside_integral( d, m, v, tri, points + d*i, patch, eps); if(inside != NULL) { if((r - floor(r)) <= eps) { inside[i] = ((index)floor(r < 0 ? -r : r) % 2); num += inside[i]; } else inside[i] = -1; // on the boundary } } free(patch); return num; } using namespace std; using namespace torch; torch::Tensor surface_inside(torch::Tensor points, torch::Tensor vertices, torch::Tensor tri, torch::Tensor params, torch::Tensor tri_num, torch::Tensor tri_idx, double eps = 1e-6) { int64_t n = get_size(points, 0), d = get_size(points, 1), m = get_size(tri, 0); bool isCuda = points.type().is_cuda(), has_grid = false; vector sz = {n, d}; CHECK_SIZE(points, sz); sz[0] = get_size(vertices, 0); CHECK_SIZE(vertices, sz); CHECK_TYPE(points, vertices); sz[0] = m; CHECK_SIZE(tri, sz); CHECK_TYPE(tri, tri_num); sz = get_size(params); if(sz.size() == 1 && sz[0] == d + 1) { CHECK_TYPE(params, points); sz = get_size(tri_num); if(sz.size() == d) { vector s = get_size(tri_idx); if(s.size() == 1) { CHECK_TYPE(tri_num, tri_idx); has_grid = true; sz.push_back(1); for(unsigned char i = 0; i < d; ++i) sz[d] *= sz[i]; } } } Tensor inside = torch::zeros({n}, NEW_TYPE(kChar,isCuda)); char *inside_ = (char*)inside.data_ptr(); switch(TYPE(points)) { case ScalarType::Float: if(isCuda) { #ifdef USE_CUDA surface_inside_gpu(n, d, m, inside_, points.data(), vertices.data(), tri.data(), (float)eps, has_grid ? params.data() : NULL, has_grid ? sz.data() : NULL, has_grid ? tri_num.data() : NULL, has_grid ? tri_idx.data() : NULL); #endif } else { surface_inside_cpu(n, d, m, inside_, points.data(), vertices.data(), tri.data(), (float)eps, has_grid ? params.data() : NULL, has_grid ? sz.data() : NULL, has_grid ? tri_num.data() : NULL, has_grid ? tri_idx.data() : NULL); } break; case ScalarType::Double: if(isCuda) { #ifdef USE_CUDA surface_inside_gpu(n, d, m, inside_, points.data(), vertices.data(), tri.data(), eps, has_grid ? params.data() : NULL, has_grid ? sz.data() : NULL, has_grid ? tri_num.data() : NULL, has_grid ? tri_idx.data() : NULL); #endif } else { surface_inside_cpu(n, d, m, inside_, points.data(), vertices.data(), tri.data(), eps, has_grid ? params.data() : NULL, has_grid ? sz.data() : NULL, has_grid ? tri_num.data() : NULL, has_grid ? tri_idx.data() : NULL); } break; default: CHECK_FLOAT(points);} return inside; } PYBIND11_MODULE(surface_inside, m) { m.def("forward", &surface_inside, "Point Inside Surface"); } ================================================ FILE: extensions/mesh_grid/test_mesh_grid.py ================================================ import os import numpy as np import torch import trimesh from mesh_grid_searcher import MeshGridSearcher torch.set_default_tensor_type('torch.cuda.FloatTensor') data_dir = '../../data/human2/SMPL' subjects = os.listdir(data_dir) for subject in subjects: mesh_path = os.path.join(data_dir, subject, f'smplx.obj') mesh = trimesh.load(mesh_path) verts = torch.Tensor(mesh.vertices) faces = torch.Tensor(mesh.faces).int() mygrid = MeshGridSearcher(verts, faces) B_MAX = mesh.vertices.max(0) B_MIN = mesh.vertices.min(0) length = B_MAX - B_MIN points = torch.Tensor(np.random.rand(10, 3) * length + B_MIN) nearest_pts, _ = mygrid.nearest_points(points) inside = mygrid.inside_mesh(points) inside_trimesh = mesh.contains(points.cpu().numpy()) sdf = (torch.norm(nearest_pts - points, dim=1) * inside.float()).cpu().numpy() sdf_trimesh = trimesh.proximity.signed_distance(mesh, points.cpu().numpy()) inside = (inside.cpu().numpy() + 1) / 2 inside_error = np.abs(inside - inside_trimesh).sum() dist_error = np.abs(sdf - sdf_trimesh).sum() print('[', subject, '] inside_error: ', inside_error, ' dist_error: ', dist_error) print('scale: ', length.max()) print(np.abs(sdf - sdf_trimesh)) ================================================ FILE: extensions/ngp_raymarch/README.md ================================================ # ngp_raymarch ## Install build and install cuda-extension,to support instant-ngp ``` cd extensions/ngp_raymarch rm -rf build && clear && python setup.py build_ext --inplace \ 2>&1 | tee build.log python setup.py install ``` ## Notice * This code mainly based on [instance-ngp](https://github.com/NVlabs/instant-ngp) code modification * This code's license belongs to [instance-ngp](https://github.com/NVlabs/instant-ngp/blob/master/LICENSE.txt) * If you found this code useful, please cite [instance-ngp](https://github.com/NVlabs/instant-ngp#license-and-citation) * We appreciate [instance-ngp](https://github.com/NVlabs/instant-ngp) for their cool code implementation ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/.gitignore ================================================ qrc_*cxx *.orig *.pyc *.diff diff *.save save *.old *.gmo *.qm core core.* *.bak *~ *build* *.moc.* *.moc ui_* CMakeCache.txt tags .*.swp activity.png *.out *.php* *.log *.orig *.rej log patch *.patch a a.* lapack/testing lapack/reference .*project .settings Makefile !ci/build.gitlab-ci.yml ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/.gitlab/issue_templates/Bug Report.md ================================================ ### Summary ### Environment - **Operating System** : Windows/Linux - **Architecture** : x64/Arm64/PowerPC ... - **Eigen Version** : 3.3.9 - **Compiler Version** : Gcc7.0 - **Compile Flags** : -O3 -march=native - **Vector Extension** : SSE/AVX/NEON ... ### Minimal Example ```cpp //show your code here ``` ### Steps to reproduce 1. first step 2. second step 3. ... ### What is the current *bug* behavior? ### What is the expected *correct* behavior? ### Relevant logs ### Warning Messages ### Benchmark scripts and results ### Anything else that might help - [ ] Have a plan to fix this issue. ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/.gitlab/issue_templates/Feature Request.md ================================================ ### Describe the feature you would like to be implemented. ### Would such a feature be useful for other users? Why? ### Any hints on how to implement the requested feature? ### Additional resources ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/.gitlab/merge_request_templates/Merge Request Template.md ================================================ ### Reference issue ### What does this implement/fix? ### Additional information ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/.gitlab-ci.yml ================================================ # This file is part of Eigen, a lightweight C++ template library # for linear algebra. # # Copyright (C) 2020 Arm Ltd. and Contributors # # This Source Code Form is subject to the terms of the Mozilla # Public License v. 2.0. If a copy of the MPL was not distributed # with this file, You can obtain one at http://mozilla.org/MPL/2.0/. stages: - buildsmoketests - smoketests - build - test variables: BUILDDIR: builddir EIGEN_CI_CMAKE_GENEATOR: "Ninja" include: - "/ci/smoketests.gitlab-ci.yml" - "/ci/build.gitlab-ci.yml" - "/ci/test.gitlab-ci.yml" ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/.hgeol ================================================ [patterns] *.sh = LF *.MINPACK = CRLF scripts/*.in = LF debug/msvc/*.dat = CRLF debug/msvc/*.natvis = CRLF unsupported/test/mpreal/*.* = CRLF ** = native [repository] native = LF ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/COPYING.APACHE ================================================ /* Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/COPYING.BSD ================================================ /* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/COPYING.GPL ================================================ GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/COPYING.LGPL ================================================ GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/COPYING.MINPACK ================================================ Minpack Copyright Notice (1999) University of Chicago. All rights reserved Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The end-user documentation included with the redistribution, if any, must include the following acknowledgment: "This product includes software developed by the University of Chicago, as Operator of Argonne National Laboratory. Alternately, this acknowledgment may appear in the software itself, if and wherever such third-party acknowledgments normally appear. 4. WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS" WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4) DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL BE CORRECTED. 5. LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT, INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE, EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE POSSIBILITY OF SUCH LOSS OR DAMAGES. ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/COPYING.MPL2 ================================================ Mozilla Public License Version 2.0 ================================== 1. Definitions -------------- 1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. 1.3. "Contribution" means Covered Software of a particular Contributor. 1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. "Incompatible With Secondary Licenses" means (a) that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or (b) that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. "Executable Form" means any form of the work other than Source Code Form. 1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" means this document. 1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. "Modifications" means any of the following: (a) any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or (b) any new file in Source Code Form that contains any Covered Software. 1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. "Source Code Form" means the form of the work preferred for making modifications. 1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions -------------------------------- 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: (a) under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and (b) under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: (a) for any code that a Contributor has removed from Covered Software; or (b) for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or (c) under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities ------------------- 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: (a) such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and (b) You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation --------------------------------------------------- If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination -------------- 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. ************************************************************************ * * * 6. Disclaimer of Warranty * * ------------------------- * * * * Covered Software is provided under this License on an "as is" * * basis, without warranty of any kind, either expressed, implied, or * * statutory, including, without limitation, warranties that the * * Covered Software is free of defects, merchantable, fit for a * * particular purpose or non-infringing. The entire risk as to the * * quality and performance of the Covered Software is with You. * * Should any Covered Software prove defective in any respect, You * * (not any Contributor) assume the cost of any necessary servicing, * * repair, or correction. This disclaimer of warranty constitutes an * * essential part of this License. No use of any Covered Software is * * authorized under this License except under this disclaimer. * * * ************************************************************************ ************************************************************************ * * * 7. Limitation of Liability * * -------------------------- * * * * Under no circumstances and under no legal theory, whether tort * * (including negligence), contract, or otherwise, shall any * * Contributor, or anyone who distributes Covered Software as * * permitted above, be liable to You for any direct, indirect, * * special, incidental, or consequential damages of any character * * including, without limitation, damages for lost profits, loss of * * goodwill, work stoppage, computer failure or malfunction, or any * * and all other commercial damages or losses, even if such party * * shall have been informed of the possibility of such damages. This * * limitation of liability shall not apply to liability for death or * * personal injury resulting from such party's negligence to the * * extent applicable law prohibits such limitation. Some * * jurisdictions do not allow the exclusion or limitation of * * incidental or consequential damages, so this exclusion and * * limitation may not apply to You. * * * ************************************************************************ 8. Litigation ------------- Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. 9. Miscellaneous ---------------- This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License --------------------------- 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice ------------------------------------------- This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - "Incompatible With Secondary Licenses" Notice --------------------------------------------------------- This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/COPYING.README ================================================ Eigen is primarily MPL2 licensed. See COPYING.MPL2 and these links: http://www.mozilla.org/MPL/2.0/ http://www.mozilla.org/MPL/2.0/FAQ.html Some files contain third-party code under BSD or LGPL licenses, whence the other COPYING.* files here. All the LGPL code is either LGPL 2.1-only, or LGPL 2.1-or-later. For this reason, the COPYING.LGPL file contains the LGPL 2.1 text. If you want to guarantee that the Eigen code that you are #including is licensed under the MPL2 and possibly more permissive licenses (like BSD), #define this preprocessor symbol: EIGEN_MPL2_ONLY For example, with most compilers, you could add this to your project CXXFLAGS: -DEIGEN_MPL2_ONLY This will cause a compilation error to be generated if you #include any code that is LGPL licensed. ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/CTestConfig.cmake ================================================ ## This file should be placed in the root directory of your project. ## Then modify the CMakeLists.txt file in the root directory of your ## project to incorporate the testing dashboard. ## # The following are required to uses Dart and the Cdash dashboard ## enable_testing() ## include(CTest) set(CTEST_PROJECT_NAME "Eigen") set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC") set(CTEST_DROP_METHOD "http") set(CTEST_DROP_SITE "my.cdash.org") set(CTEST_DROP_LOCATION "/submit.php?project=Eigen") set(CTEST_DROP_SITE_CDASH TRUE) #set(CTEST_PROJECT_SUBPROJECTS #Official #Unsupported #) ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/CTestCustom.cmake.in ================================================ set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_WARNINGS "2000") set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_ERRORS "2000") list(APPEND CTEST_CUSTOM_ERROR_EXCEPTION @EIGEN_CTEST_ERROR_EXCEPTION@) ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/Cholesky ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CHOLESKY_MODULE_H #define EIGEN_CHOLESKY_MODULE_H #include "Core" #include "Jacobi" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup Cholesky_Module Cholesky module * * * * This module provides two variants of the Cholesky decomposition for selfadjoint (hermitian) matrices. * Those decompositions are also accessible via the following methods: * - MatrixBase::llt() * - MatrixBase::ldlt() * - SelfAdjointView::llt() * - SelfAdjointView::ldlt() * * \code * #include * \endcode */ #include "src/Cholesky/LLT.h" #include "src/Cholesky/LDLT.h" #ifdef EIGEN_USE_LAPACKE #ifdef EIGEN_USE_MKL #include "mkl_lapacke.h" #else #include "src/misc/lapacke.h" #endif #include "src/Cholesky/LLT_LAPACKE.h" #endif #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_CHOLESKY_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/CholmodSupport ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CHOLMODSUPPORT_MODULE_H #define EIGEN_CHOLMODSUPPORT_MODULE_H #include "SparseCore" #include "src/Core/util/DisableStupidWarnings.h" extern "C" { #include } /** \ingroup Support_modules * \defgroup CholmodSupport_Module CholmodSupport module * * This module provides an interface to the Cholmod library which is part of the suitesparse package. * It provides the two following main factorization classes: * - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization. * - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial). * * For the sake of completeness, this module also propose the two following classes: * - class CholmodSimplicialLLT * - class CholmodSimplicialLDLT * Note that these classes does not bring any particular advantage compared to the built-in * SimplicialLLT and SimplicialLDLT factorization classes. * * \code * #include * \endcode * * In order to use this module, the cholmod headers must be accessible from the include paths, and your binary must be linked to the cholmod library and its dependencies. * The dependencies depend on how cholmod has been compiled. * For a cmake based project, you can use our FindCholmod.cmake module to help you in this task. * */ #include "src/CholmodSupport/CholmodSupport.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_CHOLMODSUPPORT_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/Core ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2007-2011 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CORE_MODULE_H #define EIGEN_CORE_MODULE_H // first thing Eigen does: stop the compiler from reporting useless warnings. #include "src/Core/util/DisableStupidWarnings.h" // then include this file where all our macros are defined. It's really important to do it first because // it's where we do all the compiler/OS/arch detections and define most defaults. #include "src/Core/util/Macros.h" // This detects SSE/AVX/NEON/etc. and configure alignment settings #include "src/Core/util/ConfigureVectorization.h" // We need cuda_runtime.h/hip_runtime.h to ensure that // the EIGEN_USING_STD macro works properly on the device side #if defined(EIGEN_CUDACC) #include #elif defined(EIGEN_HIPCC) #include #endif #ifdef EIGEN_EXCEPTIONS #include #endif // Disable the ipa-cp-clone optimization flag with MinGW 6.x or newer (enabled by default with -O3) // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details. #if EIGEN_COMP_MINGW && EIGEN_GNUC_AT_LEAST(4,6) && EIGEN_GNUC_AT_MOST(5,5) #pragma GCC optimize ("-fno-ipa-cp-clone") #endif // Prevent ICC from specializing std::complex operators that silently fail // on device. This allows us to use our own device-compatible specializations // instead. #if defined(EIGEN_COMP_ICC) && defined(EIGEN_GPU_COMPILE_PHASE) \ && !defined(_OVERRIDE_COMPLEX_SPECIALIZATION_) #define _OVERRIDE_COMPLEX_SPECIALIZATION_ 1 #endif #include // this include file manages BLAS and MKL related macros // and inclusion of their respective header files #include "src/Core/util/MKL_support.h" #if defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16) #define EIGEN_HAS_GPU_FP16 #endif #if defined(EIGEN_HAS_CUDA_BF16) || defined(EIGEN_HAS_HIP_BF16) #define EIGEN_HAS_GPU_BF16 #endif #if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE) #define EIGEN_HAS_OPENMP #endif #ifdef EIGEN_HAS_OPENMP #include #endif // MSVC for windows mobile does not have the errno.h file #if !(EIGEN_COMP_MSVC && EIGEN_OS_WINCE) && !EIGEN_COMP_ARM #define EIGEN_HAS_ERRNO #endif #ifdef EIGEN_HAS_ERRNO #include #endif #include #include #include #include #include #include #ifndef EIGEN_NO_IO #include #endif #include #include #include #include // for CHAR_BIT // for min/max: #include #if EIGEN_HAS_CXX11 #include #endif // for std::is_nothrow_move_assignable #ifdef EIGEN_INCLUDE_TYPE_TRAITS #include #endif // for outputting debug info #ifdef EIGEN_DEBUG_ASSIGN #include #endif // required for __cpuid, needs to be included after cmath // also required for _BitScanReverse on Windows on ARM #if EIGEN_COMP_MSVC && (EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM64) && !EIGEN_OS_WINCE #include #endif #if defined(EIGEN_USE_SYCL) #undef min #undef max #undef isnan #undef isinf #undef isfinite #include #include #include #include #include #ifndef EIGEN_SYCL_LOCAL_THREAD_DIM0 #define EIGEN_SYCL_LOCAL_THREAD_DIM0 16 #endif #ifndef EIGEN_SYCL_LOCAL_THREAD_DIM1 #define EIGEN_SYCL_LOCAL_THREAD_DIM1 16 #endif #endif #if defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS || defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API || defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS || defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API || defined EIGEN2_SUPPORT // This will generate an error message: #error Eigen2-support is only available up to version 3.2. Please go to "http://eigen.tuxfamily.org/index.php?title=Eigen2" for further information #endif namespace Eigen { // we use size_t frequently and we'll never remember to prepend it with std:: every time just to // ensure QNX/QCC support using std::size_t; // gcc 4.6.0 wants std:: for ptrdiff_t using std::ptrdiff_t; } /** \defgroup Core_Module Core module * This is the main module of Eigen providing dense matrix and vector support * (both fixed and dynamic size) with all the features corresponding to a BLAS library * and much more... * * \code * #include * \endcode */ #include "src/Core/util/Constants.h" #include "src/Core/util/Meta.h" #include "src/Core/util/ForwardDeclarations.h" #include "src/Core/util/StaticAssert.h" #include "src/Core/util/XprHelper.h" #include "src/Core/util/Memory.h" #include "src/Core/util/IntegralConstant.h" #include "src/Core/util/Serializer.h" #include "src/Core/util/SymbolicIndex.h" #include "src/Core/NumTraits.h" #include "src/Core/MathFunctions.h" #include "src/Core/GenericPacketMath.h" #include "src/Core/MathFunctionsImpl.h" #include "src/Core/arch/Default/ConjHelper.h" // Generic half float support #include "src/Core/arch/Default/Half.h" #include "src/Core/arch/Default/BFloat16.h" #include "src/Core/arch/Default/TypeCasting.h" #include "src/Core/arch/Default/GenericPacketMathFunctionsFwd.h" #if defined EIGEN_VECTORIZE_AVX512 #include "src/Core/arch/SSE/PacketMath.h" #include "src/Core/arch/SSE/TypeCasting.h" #include "src/Core/arch/SSE/Complex.h" #include "src/Core/arch/AVX/PacketMath.h" #include "src/Core/arch/AVX/TypeCasting.h" #include "src/Core/arch/AVX/Complex.h" #include "src/Core/arch/AVX512/PacketMath.h" #include "src/Core/arch/AVX512/TypeCasting.h" #include "src/Core/arch/AVX512/Complex.h" #include "src/Core/arch/SSE/MathFunctions.h" #include "src/Core/arch/AVX/MathFunctions.h" #include "src/Core/arch/AVX512/MathFunctions.h" #elif defined EIGEN_VECTORIZE_AVX // Use AVX for floats and doubles, SSE for integers #include "src/Core/arch/SSE/PacketMath.h" #include "src/Core/arch/SSE/TypeCasting.h" #include "src/Core/arch/SSE/Complex.h" #include "src/Core/arch/AVX/PacketMath.h" #include "src/Core/arch/AVX/TypeCasting.h" #include "src/Core/arch/AVX/Complex.h" #include "src/Core/arch/SSE/MathFunctions.h" #include "src/Core/arch/AVX/MathFunctions.h" #elif defined EIGEN_VECTORIZE_SSE #include "src/Core/arch/SSE/PacketMath.h" #include "src/Core/arch/SSE/TypeCasting.h" #include "src/Core/arch/SSE/MathFunctions.h" #include "src/Core/arch/SSE/Complex.h" #elif defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX) #include "src/Core/arch/AltiVec/PacketMath.h" #include "src/Core/arch/AltiVec/MathFunctions.h" #include "src/Core/arch/AltiVec/Complex.h" #elif defined EIGEN_VECTORIZE_NEON #include "src/Core/arch/NEON/PacketMath.h" #include "src/Core/arch/NEON/TypeCasting.h" #include "src/Core/arch/NEON/MathFunctions.h" #include "src/Core/arch/NEON/Complex.h" #elif defined EIGEN_VECTORIZE_SVE #include "src/Core/arch/SVE/PacketMath.h" #include "src/Core/arch/SVE/TypeCasting.h" #include "src/Core/arch/SVE/MathFunctions.h" #elif defined EIGEN_VECTORIZE_ZVECTOR #include "src/Core/arch/ZVector/PacketMath.h" #include "src/Core/arch/ZVector/MathFunctions.h" #include "src/Core/arch/ZVector/Complex.h" #elif defined EIGEN_VECTORIZE_MSA #include "src/Core/arch/MSA/PacketMath.h" #include "src/Core/arch/MSA/MathFunctions.h" #include "src/Core/arch/MSA/Complex.h" #endif #if defined EIGEN_VECTORIZE_GPU #include "src/Core/arch/GPU/PacketMath.h" #include "src/Core/arch/GPU/MathFunctions.h" #include "src/Core/arch/GPU/TypeCasting.h" #endif #if defined(EIGEN_USE_SYCL) #include "src/Core/arch/SYCL/SyclMemoryModel.h" #include "src/Core/arch/SYCL/InteropHeaders.h" #if !defined(EIGEN_DONT_VECTORIZE_SYCL) #include "src/Core/arch/SYCL/PacketMath.h" #include "src/Core/arch/SYCL/MathFunctions.h" #include "src/Core/arch/SYCL/TypeCasting.h" #endif #endif #include "src/Core/arch/Default/Settings.h" // This file provides generic implementations valid for scalar as well #include "src/Core/arch/Default/GenericPacketMathFunctions.h" #include "src/Core/functors/TernaryFunctors.h" #include "src/Core/functors/BinaryFunctors.h" #include "src/Core/functors/UnaryFunctors.h" #include "src/Core/functors/NullaryFunctors.h" #include "src/Core/functors/StlFunctors.h" #include "src/Core/functors/AssignmentFunctors.h" // Specialized functors for GPU. #ifdef EIGEN_GPUCC #include "src/Core/arch/GPU/Complex.h" #endif // Specializations of vectorized activation functions for NEON. #ifdef EIGEN_VECTORIZE_NEON #include "src/Core/arch/NEON/UnaryFunctors.h" #endif #include "src/Core/util/IndexedViewHelper.h" #include "src/Core/util/ReshapedHelper.h" #include "src/Core/ArithmeticSequence.h" #ifndef EIGEN_NO_IO #include "src/Core/IO.h" #endif #include "src/Core/DenseCoeffsBase.h" #include "src/Core/DenseBase.h" #include "src/Core/MatrixBase.h" #include "src/Core/EigenBase.h" #include "src/Core/Product.h" #include "src/Core/CoreEvaluators.h" #include "src/Core/AssignEvaluator.h" #ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874 // at least confirmed with Doxygen 1.5.5 and 1.5.6 #include "src/Core/Assign.h" #endif #include "src/Core/ArrayBase.h" #include "src/Core/util/BlasUtil.h" #include "src/Core/DenseStorage.h" #include "src/Core/NestByValue.h" // #include "src/Core/ForceAlignedAccess.h" #include "src/Core/ReturnByValue.h" #include "src/Core/NoAlias.h" #include "src/Core/PlainObjectBase.h" #include "src/Core/Matrix.h" #include "src/Core/Array.h" #include "src/Core/CwiseTernaryOp.h" #include "src/Core/CwiseBinaryOp.h" #include "src/Core/CwiseUnaryOp.h" #include "src/Core/CwiseNullaryOp.h" #include "src/Core/CwiseUnaryView.h" #include "src/Core/SelfCwiseBinaryOp.h" #include "src/Core/Dot.h" #include "src/Core/StableNorm.h" #include "src/Core/Stride.h" #include "src/Core/MapBase.h" #include "src/Core/Map.h" #include "src/Core/Ref.h" #include "src/Core/Block.h" #include "src/Core/VectorBlock.h" #include "src/Core/IndexedView.h" #include "src/Core/Reshaped.h" #include "src/Core/Transpose.h" #include "src/Core/DiagonalMatrix.h" #include "src/Core/Diagonal.h" #include "src/Core/DiagonalProduct.h" #include "src/Core/Redux.h" #include "src/Core/Visitor.h" #include "src/Core/Fuzzy.h" #include "src/Core/Swap.h" #include "src/Core/CommaInitializer.h" #include "src/Core/GeneralProduct.h" #include "src/Core/Solve.h" #include "src/Core/Inverse.h" #include "src/Core/SolverBase.h" #include "src/Core/PermutationMatrix.h" #include "src/Core/Transpositions.h" #include "src/Core/TriangularMatrix.h" #include "src/Core/SelfAdjointView.h" #include "src/Core/products/GeneralBlockPanelKernel.h" #include "src/Core/products/Parallelizer.h" #include "src/Core/ProductEvaluators.h" #include "src/Core/products/GeneralMatrixVector.h" #include "src/Core/products/GeneralMatrixMatrix.h" #include "src/Core/SolveTriangular.h" #include "src/Core/products/GeneralMatrixMatrixTriangular.h" #include "src/Core/products/SelfadjointMatrixVector.h" #include "src/Core/products/SelfadjointMatrixMatrix.h" #include "src/Core/products/SelfadjointProduct.h" #include "src/Core/products/SelfadjointRank2Update.h" #include "src/Core/products/TriangularMatrixVector.h" #include "src/Core/products/TriangularMatrixMatrix.h" #include "src/Core/products/TriangularSolverMatrix.h" #include "src/Core/products/TriangularSolverVector.h" #include "src/Core/BandMatrix.h" #include "src/Core/CoreIterators.h" #include "src/Core/ConditionEstimator.h" #if defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX) #include "src/Core/arch/AltiVec/MatrixProduct.h" #elif defined EIGEN_VECTORIZE_NEON #include "src/Core/arch/NEON/GeneralBlockPanelKernel.h" #endif #include "src/Core/BooleanRedux.h" #include "src/Core/Select.h" #include "src/Core/VectorwiseOp.h" #include "src/Core/PartialReduxEvaluator.h" #include "src/Core/Random.h" #include "src/Core/Replicate.h" #include "src/Core/Reverse.h" #include "src/Core/ArrayWrapper.h" #include "src/Core/StlIterators.h" #ifdef EIGEN_USE_BLAS #include "src/Core/products/GeneralMatrixMatrix_BLAS.h" #include "src/Core/products/GeneralMatrixVector_BLAS.h" #include "src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h" #include "src/Core/products/SelfadjointMatrixMatrix_BLAS.h" #include "src/Core/products/SelfadjointMatrixVector_BLAS.h" #include "src/Core/products/TriangularMatrixMatrix_BLAS.h" #include "src/Core/products/TriangularMatrixVector_BLAS.h" #include "src/Core/products/TriangularSolverMatrix_BLAS.h" #endif // EIGEN_USE_BLAS #ifdef EIGEN_USE_MKL_VML #include "src/Core/Assign_MKL.h" #endif #include "src/Core/GlobalFunctions.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_CORE_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/Dense ================================================ #include "Core" #include "LU" #include "Cholesky" #include "QR" #include "SVD" #include "Geometry" #include "Eigenvalues" ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/Eigen ================================================ #include "Dense" #include "Sparse" ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/Eigenvalues ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_EIGENVALUES_MODULE_H #define EIGEN_EIGENVALUES_MODULE_H #include "Core" #include "Cholesky" #include "Jacobi" #include "Householder" #include "LU" #include "Geometry" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup Eigenvalues_Module Eigenvalues module * * * * This module mainly provides various eigenvalue solvers. * This module also provides some MatrixBase methods, including: * - MatrixBase::eigenvalues(), * - MatrixBase::operatorNorm() * * \code * #include * \endcode */ #include "src/misc/RealSvd2x2.h" #include "src/Eigenvalues/Tridiagonalization.h" #include "src/Eigenvalues/RealSchur.h" #include "src/Eigenvalues/EigenSolver.h" #include "src/Eigenvalues/SelfAdjointEigenSolver.h" #include "src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h" #include "src/Eigenvalues/HessenbergDecomposition.h" #include "src/Eigenvalues/ComplexSchur.h" #include "src/Eigenvalues/ComplexEigenSolver.h" #include "src/Eigenvalues/RealQZ.h" #include "src/Eigenvalues/GeneralizedEigenSolver.h" #include "src/Eigenvalues/MatrixBaseEigenvalues.h" #ifdef EIGEN_USE_LAPACKE #ifdef EIGEN_USE_MKL #include "mkl_lapacke.h" #else #include "src/misc/lapacke.h" #endif #include "src/Eigenvalues/RealSchur_LAPACKE.h" #include "src/Eigenvalues/ComplexSchur_LAPACKE.h" #include "src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h" #endif #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_EIGENVALUES_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/Geometry ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GEOMETRY_MODULE_H #define EIGEN_GEOMETRY_MODULE_H #include "Core" #include "SVD" #include "LU" #include #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup Geometry_Module Geometry module * * This module provides support for: * - fixed-size homogeneous transformations * - translation, scaling, 2D and 3D rotations * - \link Quaternion quaternions \endlink * - cross products (\ref MatrixBase::cross, \ref MatrixBase::cross3) * - orthognal vector generation (\ref MatrixBase::unitOrthogonal) * - some linear components: \link ParametrizedLine parametrized-lines \endlink and \link Hyperplane hyperplanes \endlink * - \link AlignedBox axis aligned bounding boxes \endlink * - \link umeyama least-square transformation fitting \endlink * * \code * #include * \endcode */ #include "src/Geometry/OrthoMethods.h" #include "src/Geometry/EulerAngles.h" #include "src/Geometry/Homogeneous.h" #include "src/Geometry/RotationBase.h" #include "src/Geometry/Rotation2D.h" #include "src/Geometry/Quaternion.h" #include "src/Geometry/AngleAxis.h" #include "src/Geometry/Transform.h" #include "src/Geometry/Translation.h" #include "src/Geometry/Scaling.h" #include "src/Geometry/Hyperplane.h" #include "src/Geometry/ParametrizedLine.h" #include "src/Geometry/AlignedBox.h" #include "src/Geometry/Umeyama.h" // Use the SSE optimized version whenever possible. #if (defined EIGEN_VECTORIZE_SSE) || (defined EIGEN_VECTORIZE_NEON) #include "src/Geometry/arch/Geometry_SIMD.h" #endif #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_GEOMETRY_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/Householder ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HOUSEHOLDER_MODULE_H #define EIGEN_HOUSEHOLDER_MODULE_H #include "Core" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup Householder_Module Householder module * This module provides Householder transformations. * * \code * #include * \endcode */ #include "src/Householder/Householder.h" #include "src/Householder/HouseholderSequence.h" #include "src/Householder/BlockHouseholder.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_HOUSEHOLDER_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/IterativeLinearSolvers ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ITERATIVELINEARSOLVERS_MODULE_H #define EIGEN_ITERATIVELINEARSOLVERS_MODULE_H #include "SparseCore" #include "OrderingMethods" #include "src/Core/util/DisableStupidWarnings.h" /** * \defgroup IterativeLinearSolvers_Module IterativeLinearSolvers module * * This module currently provides iterative methods to solve problems of the form \c A \c x = \c b, where \c A is a squared matrix, usually very large and sparse. * Those solvers are accessible via the following classes: * - ConjugateGradient for selfadjoint (hermitian) matrices, * - LeastSquaresConjugateGradient for rectangular least-square problems, * - BiCGSTAB for general square matrices. * * These iterative solvers are associated with some preconditioners: * - IdentityPreconditioner - not really useful * - DiagonalPreconditioner - also called Jacobi preconditioner, work very well on diagonal dominant matrices. * - IncompleteLUT - incomplete LU factorization with dual thresholding * * Such problems can also be solved using the direct sparse decomposition modules: SparseCholesky, CholmodSupport, UmfPackSupport, SuperLUSupport. * \code #include \endcode */ #include "src/IterativeLinearSolvers/SolveWithGuess.h" #include "src/IterativeLinearSolvers/IterativeSolverBase.h" #include "src/IterativeLinearSolvers/BasicPreconditioners.h" #include "src/IterativeLinearSolvers/ConjugateGradient.h" #include "src/IterativeLinearSolvers/LeastSquareConjugateGradient.h" #include "src/IterativeLinearSolvers/BiCGSTAB.h" #include "src/IterativeLinearSolvers/IncompleteLUT.h" #include "src/IterativeLinearSolvers/IncompleteCholesky.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_ITERATIVELINEARSOLVERS_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/Jacobi ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_JACOBI_MODULE_H #define EIGEN_JACOBI_MODULE_H #include "Core" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup Jacobi_Module Jacobi module * This module provides Jacobi and Givens rotations. * * \code * #include * \endcode * * In addition to listed classes, it defines the two following MatrixBase methods to apply a Jacobi or Givens rotation: * - MatrixBase::applyOnTheLeft() * - MatrixBase::applyOnTheRight(). */ #include "src/Jacobi/Jacobi.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_JACOBI_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/KLUSupport ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_KLUSUPPORT_MODULE_H #define EIGEN_KLUSUPPORT_MODULE_H #include #include extern "C" { #include #include } /** \ingroup Support_modules * \defgroup KLUSupport_Module KLUSupport module * * This module provides an interface to the KLU library which is part of the suitesparse package. * It provides the following factorization class: * - class KLU: a sparse LU factorization, well-suited for circuit simulation. * * \code * #include * \endcode * * In order to use this module, the klu and btf headers must be accessible from the include paths, and your binary must be linked to the klu library and its dependencies. * The dependencies depend on how umfpack has been compiled. * For a cmake based project, you can use our FindKLU.cmake module to help you in this task. * */ #include "src/KLUSupport/KLUSupport.h" #include #endif // EIGEN_KLUSUPPORT_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/LU ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LU_MODULE_H #define EIGEN_LU_MODULE_H #include "Core" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup LU_Module LU module * This module includes %LU decomposition and related notions such as matrix inversion and determinant. * This module defines the following MatrixBase methods: * - MatrixBase::inverse() * - MatrixBase::determinant() * * \code * #include * \endcode */ #include "src/misc/Kernel.h" #include "src/misc/Image.h" #include "src/LU/FullPivLU.h" #include "src/LU/PartialPivLU.h" #ifdef EIGEN_USE_LAPACKE #ifdef EIGEN_USE_MKL #include "mkl_lapacke.h" #else #include "src/misc/lapacke.h" #endif #include "src/LU/PartialPivLU_LAPACKE.h" #endif #include "src/LU/Determinant.h" #include "src/LU/InverseImpl.h" #if defined EIGEN_VECTORIZE_SSE || defined EIGEN_VECTORIZE_NEON #include "src/LU/arch/InverseSize4.h" #endif #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_LU_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/MetisSupport ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_METISSUPPORT_MODULE_H #define EIGEN_METISSUPPORT_MODULE_H #include "SparseCore" #include "src/Core/util/DisableStupidWarnings.h" extern "C" { #include } /** \ingroup Support_modules * \defgroup MetisSupport_Module MetisSupport module * * \code * #include * \endcode * This module defines an interface to the METIS reordering package (http://glaros.dtc.umn.edu/gkhome/views/metis). * It can be used just as any other built-in method as explained in \link OrderingMethods_Module here. \endlink */ #include "src/MetisSupport/MetisSupport.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_METISSUPPORT_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/OrderingMethods ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ORDERINGMETHODS_MODULE_H #define EIGEN_ORDERINGMETHODS_MODULE_H #include "SparseCore" #include "src/Core/util/DisableStupidWarnings.h" /** * \defgroup OrderingMethods_Module OrderingMethods module * * This module is currently for internal use only * * It defines various built-in and external ordering methods for sparse matrices. * They are typically used to reduce the number of elements during * the sparse matrix decomposition (LLT, LU, QR). * Precisely, in a preprocessing step, a permutation matrix P is computed using * those ordering methods and applied to the columns of the matrix. * Using for instance the sparse Cholesky decomposition, it is expected that * the nonzeros elements in LLT(A*P) will be much smaller than that in LLT(A). * * * Usage : * \code * #include * \endcode * * A simple usage is as a template parameter in the sparse decomposition classes : * * \code * SparseLU > solver; * \endcode * * \code * SparseQR > solver; * \endcode * * It is possible as well to call directly a particular ordering method for your own purpose, * \code * AMDOrdering ordering; * PermutationMatrix perm; * SparseMatrix A; * //Fill the matrix ... * * ordering(A, perm); // Call AMD * \endcode * * \note Some of these methods (like AMD or METIS), need the sparsity pattern * of the input matrix to be symmetric. When the matrix is structurally unsymmetric, * Eigen computes internally the pattern of \f$A^T*A\f$ before calling the method. * If your matrix is already symmetric (at leat in structure), you can avoid that * by calling the method with a SelfAdjointView type. * * \code * // Call the ordering on the pattern of the lower triangular matrix A * ordering(A.selfadjointView(), perm); * \endcode */ #include "src/OrderingMethods/Amd.h" #include "src/OrderingMethods/Ordering.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_ORDERINGMETHODS_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/PaStiXSupport ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PASTIXSUPPORT_MODULE_H #define EIGEN_PASTIXSUPPORT_MODULE_H #include "SparseCore" #include "src/Core/util/DisableStupidWarnings.h" extern "C" { #include #include } #ifdef complex #undef complex #endif /** \ingroup Support_modules * \defgroup PaStiXSupport_Module PaStiXSupport module * * This module provides an interface to the PaSTiX library. * PaSTiX is a general \b supernodal, \b parallel and \b opensource sparse solver. * It provides the two following main factorization classes: * - class PastixLLT : a supernodal, parallel LLt Cholesky factorization. * - class PastixLDLT: a supernodal, parallel LDLt Cholesky factorization. * - class PastixLU : a supernodal, parallel LU factorization (optimized for a symmetric pattern). * * \code * #include * \endcode * * In order to use this module, the PaSTiX headers must be accessible from the include paths, and your binary must be linked to the PaSTiX library and its dependencies. * This wrapper resuires PaStiX version 5.x compiled without MPI support. * The dependencies depend on how PaSTiX has been compiled. * For a cmake based project, you can use our FindPaSTiX.cmake module to help you in this task. * */ #include "src/PaStiXSupport/PaStiXSupport.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_PASTIXSUPPORT_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/PardisoSupport ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARDISOSUPPORT_MODULE_H #define EIGEN_PARDISOSUPPORT_MODULE_H #include "SparseCore" #include "src/Core/util/DisableStupidWarnings.h" #include /** \ingroup Support_modules * \defgroup PardisoSupport_Module PardisoSupport module * * This module brings support for the Intel(R) MKL PARDISO direct sparse solvers. * * \code * #include * \endcode * * In order to use this module, the MKL headers must be accessible from the include paths, and your binary must be linked to the MKL library and its dependencies. * See this \ref TopicUsingIntelMKL "page" for more information on MKL-Eigen integration. * */ #include "src/PardisoSupport/PardisoSupport.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_PARDISOSUPPORT_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/QR ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_QR_MODULE_H #define EIGEN_QR_MODULE_H #include "Core" #include "Cholesky" #include "Jacobi" #include "Householder" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup QR_Module QR module * * * * This module provides various QR decompositions * This module also provides some MatrixBase methods, including: * - MatrixBase::householderQr() * - MatrixBase::colPivHouseholderQr() * - MatrixBase::fullPivHouseholderQr() * * \code * #include * \endcode */ #include "src/QR/HouseholderQR.h" #include "src/QR/FullPivHouseholderQR.h" #include "src/QR/ColPivHouseholderQR.h" #include "src/QR/CompleteOrthogonalDecomposition.h" #ifdef EIGEN_USE_LAPACKE #ifdef EIGEN_USE_MKL #include "mkl_lapacke.h" #else #include "src/misc/lapacke.h" #endif #include "src/QR/HouseholderQR_LAPACKE.h" #include "src/QR/ColPivHouseholderQR_LAPACKE.h" #endif #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_QR_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/QtAlignedMalloc ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_QTMALLOC_MODULE_H #define EIGEN_QTMALLOC_MODULE_H #include "Core" #if (!EIGEN_MALLOC_ALREADY_ALIGNED) #include "src/Core/util/DisableStupidWarnings.h" void *qMalloc(std::size_t size) { return Eigen::internal::aligned_malloc(size); } void qFree(void *ptr) { Eigen::internal::aligned_free(ptr); } void *qRealloc(void *ptr, std::size_t size) { void* newPtr = Eigen::internal::aligned_malloc(size); std::memcpy(newPtr, ptr, size); Eigen::internal::aligned_free(ptr); return newPtr; } #include "src/Core/util/ReenableStupidWarnings.h" #endif #endif // EIGEN_QTMALLOC_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/SPQRSupport ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPQRSUPPORT_MODULE_H #define EIGEN_SPQRSUPPORT_MODULE_H #include "SparseCore" #include "src/Core/util/DisableStupidWarnings.h" #include "SuiteSparseQR.hpp" /** \ingroup Support_modules * \defgroup SPQRSupport_Module SuiteSparseQR module * * This module provides an interface to the SPQR library, which is part of the suitesparse package. * * \code * #include * \endcode * * In order to use this module, the SPQR headers must be accessible from the include paths, and your binary must be linked to the SPQR library and its dependencies (Cholmod, AMD, COLAMD,...). * For a cmake based project, you can use our FindSPQR.cmake and FindCholmod.Cmake modules * */ #include "src/CholmodSupport/CholmodSupport.h" #include "src/SPQRSupport/SuiteSparseQRSupport.h" #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/SVD ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SVD_MODULE_H #define EIGEN_SVD_MODULE_H #include "QR" #include "Householder" #include "Jacobi" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup SVD_Module SVD module * * * * This module provides SVD decomposition for matrices (both real and complex). * Two decomposition algorithms are provided: * - JacobiSVD implementing two-sided Jacobi iterations is numerically very accurate, fast for small matrices, but very slow for larger ones. * - BDCSVD implementing a recursive divide & conquer strategy on top of an upper-bidiagonalization which remains fast for large problems. * These decompositions are accessible via the respective classes and following MatrixBase methods: * - MatrixBase::jacobiSvd() * - MatrixBase::bdcSvd() * * \code * #include * \endcode */ #include "src/misc/RealSvd2x2.h" #include "src/SVD/UpperBidiagonalization.h" #include "src/SVD/SVDBase.h" #include "src/SVD/JacobiSVD.h" #include "src/SVD/BDCSVD.h" #if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT) #ifdef EIGEN_USE_MKL #include "mkl_lapacke.h" #else #include "src/misc/lapacke.h" #endif #include "src/SVD/JacobiSVD_LAPACKE.h" #endif #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SVD_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/Sparse ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_MODULE_H #define EIGEN_SPARSE_MODULE_H /** \defgroup Sparse_Module Sparse meta-module * * Meta-module including all related modules: * - \ref SparseCore_Module * - \ref OrderingMethods_Module * - \ref SparseCholesky_Module * - \ref SparseLU_Module * - \ref SparseQR_Module * - \ref IterativeLinearSolvers_Module * \code #include \endcode */ #include "SparseCore" #include "OrderingMethods" #include "SparseCholesky" #include "SparseLU" #include "SparseQR" #include "IterativeLinearSolvers" #endif // EIGEN_SPARSE_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/SparseCholesky ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2013 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSECHOLESKY_MODULE_H #define EIGEN_SPARSECHOLESKY_MODULE_H #include "SparseCore" #include "OrderingMethods" #include "src/Core/util/DisableStupidWarnings.h" /** * \defgroup SparseCholesky_Module SparseCholesky module * * This module currently provides two variants of the direct sparse Cholesky decomposition for selfadjoint (hermitian) matrices. * Those decompositions are accessible via the following classes: * - SimplicialLLt, * - SimplicialLDLt * * Such problems can also be solved using the ConjugateGradient solver from the IterativeLinearSolvers module. * * \code * #include * \endcode */ #include "src/SparseCholesky/SimplicialCholesky.h" #include "src/SparseCholesky/SimplicialCholesky_impl.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SPARSECHOLESKY_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/SparseCore ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSECORE_MODULE_H #define EIGEN_SPARSECORE_MODULE_H #include "Core" #include "src/Core/util/DisableStupidWarnings.h" #include #include #include #include #include /** * \defgroup SparseCore_Module SparseCore module * * This module provides a sparse matrix representation, and basic associated matrix manipulations * and operations. * * See the \ref TutorialSparse "Sparse tutorial" * * \code * #include * \endcode * * This module depends on: Core. */ #include "src/SparseCore/SparseUtil.h" #include "src/SparseCore/SparseMatrixBase.h" #include "src/SparseCore/SparseAssign.h" #include "src/SparseCore/CompressedStorage.h" #include "src/SparseCore/AmbiVector.h" #include "src/SparseCore/SparseCompressedBase.h" #include "src/SparseCore/SparseMatrix.h" #include "src/SparseCore/SparseMap.h" #include "src/SparseCore/MappedSparseMatrix.h" #include "src/SparseCore/SparseVector.h" #include "src/SparseCore/SparseRef.h" #include "src/SparseCore/SparseCwiseUnaryOp.h" #include "src/SparseCore/SparseCwiseBinaryOp.h" #include "src/SparseCore/SparseTranspose.h" #include "src/SparseCore/SparseBlock.h" #include "src/SparseCore/SparseDot.h" #include "src/SparseCore/SparseRedux.h" #include "src/SparseCore/SparseView.h" #include "src/SparseCore/SparseDiagonalProduct.h" #include "src/SparseCore/ConservativeSparseSparseProduct.h" #include "src/SparseCore/SparseSparseProductWithPruning.h" #include "src/SparseCore/SparseProduct.h" #include "src/SparseCore/SparseDenseProduct.h" #include "src/SparseCore/SparseSelfAdjointView.h" #include "src/SparseCore/SparseTriangularView.h" #include "src/SparseCore/TriangularSolver.h" #include "src/SparseCore/SparsePermutation.h" #include "src/SparseCore/SparseFuzzy.h" #include "src/SparseCore/SparseSolverBase.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SPARSECORE_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/SparseLU ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // Copyright (C) 2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSELU_MODULE_H #define EIGEN_SPARSELU_MODULE_H #include "SparseCore" /** * \defgroup SparseLU_Module SparseLU module * This module defines a supernodal factorization of general sparse matrices. * The code is fully optimized for supernode-panel updates with specialized kernels. * Please, see the documentation of the SparseLU class for more details. */ // Ordering interface #include "OrderingMethods" #include "src/Core/util/DisableStupidWarnings.h" #include "src/SparseLU/SparseLU_gemm_kernel.h" #include "src/SparseLU/SparseLU_Structs.h" #include "src/SparseLU/SparseLU_SupernodalMatrix.h" #include "src/SparseLU/SparseLUImpl.h" #include "src/SparseCore/SparseColEtree.h" #include "src/SparseLU/SparseLU_Memory.h" #include "src/SparseLU/SparseLU_heap_relax_snode.h" #include "src/SparseLU/SparseLU_relax_snode.h" #include "src/SparseLU/SparseLU_pivotL.h" #include "src/SparseLU/SparseLU_panel_dfs.h" #include "src/SparseLU/SparseLU_kernel_bmod.h" #include "src/SparseLU/SparseLU_panel_bmod.h" #include "src/SparseLU/SparseLU_column_dfs.h" #include "src/SparseLU/SparseLU_column_bmod.h" #include "src/SparseLU/SparseLU_copy_to_ucol.h" #include "src/SparseLU/SparseLU_pruneL.h" #include "src/SparseLU/SparseLU_Utils.h" #include "src/SparseLU/SparseLU.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SPARSELU_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/SparseQR ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEQR_MODULE_H #define EIGEN_SPARSEQR_MODULE_H #include "SparseCore" #include "OrderingMethods" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup SparseQR_Module SparseQR module * \brief Provides QR decomposition for sparse matrices * * This module provides a simplicial version of the left-looking Sparse QR decomposition. * The columns of the input matrix should be reordered to limit the fill-in during the * decomposition. Built-in methods (COLAMD, AMD) or external methods (METIS) can be used to this end. * See the \link OrderingMethods_Module OrderingMethods\endlink module for the list * of built-in and external ordering methods. * * \code * #include * \endcode * * */ #include "src/SparseCore/SparseColEtree.h" #include "src/SparseQR/SparseQR.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/StdDeque ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2009 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STDDEQUE_MODULE_H #define EIGEN_STDDEQUE_MODULE_H #include "Core" #include #if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */ #define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) #else #include "src/StlSupport/StdDeque.h" #endif #endif // EIGEN_STDDEQUE_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/StdList ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STDLIST_MODULE_H #define EIGEN_STDLIST_MODULE_H #include "Core" #include #if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */ #define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) #else #include "src/StlSupport/StdList.h" #endif #endif // EIGEN_STDLIST_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/StdVector ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2009 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STDVECTOR_MODULE_H #define EIGEN_STDVECTOR_MODULE_H #include "Core" #include #if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */ #define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) #else #include "src/StlSupport/StdVector.h" #endif #endif // EIGEN_STDVECTOR_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/SuperLUSupport ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SUPERLUSUPPORT_MODULE_H #define EIGEN_SUPERLUSUPPORT_MODULE_H #include "SparseCore" #include "src/Core/util/DisableStupidWarnings.h" #ifdef EMPTY #define EIGEN_EMPTY_WAS_ALREADY_DEFINED #endif typedef int int_t; #include #include #include // slu_util.h defines a preprocessor token named EMPTY which is really polluting, // so we remove it in favor of a SUPERLU_EMPTY token. // If EMPTY was already defined then we don't undef it. #if defined(EIGEN_EMPTY_WAS_ALREADY_DEFINED) # undef EIGEN_EMPTY_WAS_ALREADY_DEFINED #elif defined(EMPTY) # undef EMPTY #endif #define SUPERLU_EMPTY (-1) namespace Eigen { struct SluMatrix; } /** \ingroup Support_modules * \defgroup SuperLUSupport_Module SuperLUSupport module * * This module provides an interface to the SuperLU library. * It provides the following factorization class: * - class SuperLU: a supernodal sequential LU factorization. * - class SuperILU: a supernodal sequential incomplete LU factorization (to be used as a preconditioner for iterative methods). * * \warning This wrapper requires at least versions 4.0 of SuperLU. The 3.x versions are not supported. * * \warning When including this module, you have to use SUPERLU_EMPTY instead of EMPTY which is no longer defined because it is too polluting. * * \code * #include * \endcode * * In order to use this module, the superlu headers must be accessible from the include paths, and your binary must be linked to the superlu library and its dependencies. * The dependencies depend on how superlu has been compiled. * For a cmake based project, you can use our FindSuperLU.cmake module to help you in this task. * */ #include "src/SuperLUSupport/SuperLUSupport.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SUPERLUSUPPORT_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/UmfPackSupport ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_UMFPACKSUPPORT_MODULE_H #define EIGEN_UMFPACKSUPPORT_MODULE_H #include "SparseCore" #include "src/Core/util/DisableStupidWarnings.h" extern "C" { #include } /** \ingroup Support_modules * \defgroup UmfPackSupport_Module UmfPackSupport module * * This module provides an interface to the UmfPack library which is part of the suitesparse package. * It provides the following factorization class: * - class UmfPackLU: a multifrontal sequential LU factorization. * * \code * #include * \endcode * * In order to use this module, the umfpack headers must be accessible from the include paths, and your binary must be linked to the umfpack library and its dependencies. * The dependencies depend on how umfpack has been compiled. * For a cmake based project, you can use our FindUmfPack.cmake module to help you in this task. * */ #include "src/UmfPackSupport/UmfPackSupport.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_UMFPACKSUPPORT_MODULE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Cholesky/InternalHeaderCheck.h ================================================ #ifndef EIGEN_CHOLESKY_MODULE_H #error "Please include Eigen/Cholesky instead of including headers inside the src directory directly." #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Cholesky/LDLT.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2011 Gael Guennebaud // Copyright (C) 2009 Keir Mierle // Copyright (C) 2009 Benoit Jacob // Copyright (C) 2011 Timothy E. Holy // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LDLT_H #define EIGEN_LDLT_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > : traits { typedef MatrixXpr XprKind; typedef SolverStorage StorageKind; typedef int StorageIndex; enum { Flags = 0 }; }; template struct LDLT_Traits; // PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite }; } /** \ingroup Cholesky_Module * * \class LDLT * * \brief Robust Cholesky decomposition of a matrix with pivoting * * \tparam MatrixType_ the type of the matrix of which to compute the LDL^T Cholesky decomposition * \tparam UpLo_ the triangular part that will be used for the decomposition: Lower (default) or Upper. * The other triangular part won't be read. * * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite * matrix \f$ A \f$ such that \f$ A = P^TLDL^*P \f$, where P is a permutation matrix, L * is lower triangular with a unit diagonal and D is a diagonal matrix. * * The decomposition uses pivoting to ensure stability, so that D will have * zeros in the bottom right rank(A) - n submatrix. Avoiding the square root * on D also stabilizes the computation. * * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky * decomposition to determine whether a system of equations has a solution. * * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. * * \sa MatrixBase::ldlt(), SelfAdjointView::ldlt(), class LLT */ template class LDLT : public SolverBase > { public: typedef MatrixType_ MatrixType; typedef SolverBase Base; friend class SolverBase; EIGEN_GENERIC_PUBLIC_INTERFACE(LDLT) enum { MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, UpLo = UpLo_ }; typedef Matrix TmpMatrixType; typedef Transpositions TranspositionType; typedef PermutationMatrix PermutationType; typedef internal::LDLT_Traits Traits; /** \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via LDLT::compute(const MatrixType&). */ LDLT() : m_matrix(), m_transpositions(), m_sign(internal::ZeroSign), m_isInitialized(false) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa LDLT() */ explicit LDLT(Index size) : m_matrix(size, size), m_transpositions(size), m_temporary(size), m_sign(internal::ZeroSign), m_isInitialized(false) {} /** \brief Constructor with decomposition * * This calculates the decomposition for the input \a matrix. * * \sa LDLT(Index size) */ template explicit LDLT(const EigenBase& matrix) : m_matrix(matrix.rows(), matrix.cols()), m_transpositions(matrix.rows()), m_temporary(matrix.rows()), m_sign(internal::ZeroSign), m_isInitialized(false) { compute(matrix.derived()); } /** \brief Constructs a LDLT factorization from a given matrix * * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref. * * \sa LDLT(const EigenBase&) */ template explicit LDLT(EigenBase& matrix) : m_matrix(matrix.derived()), m_transpositions(matrix.rows()), m_temporary(matrix.rows()), m_sign(internal::ZeroSign), m_isInitialized(false) { compute(matrix.derived()); } /** Clear any existing decomposition * \sa rankUpdate(w,sigma) */ void setZero() { m_isInitialized = false; } /** \returns a view of the upper triangular matrix U */ inline typename Traits::MatrixU matrixU() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return Traits::getU(m_matrix); } /** \returns a view of the lower triangular matrix L */ inline typename Traits::MatrixL matrixL() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return Traits::getL(m_matrix); } /** \returns the permutation matrix P as a transposition sequence. */ inline const TranspositionType& transpositionsP() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_transpositions; } /** \returns the coefficients of the diagonal matrix D */ inline Diagonal vectorD() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_matrix.diagonal(); } /** \returns true if the matrix is positive (semidefinite) */ inline bool isPositive() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign; } /** \returns true if the matrix is negative (semidefinite) */ inline bool isNegative(void) const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign; } #ifdef EIGEN_PARSED_BY_DOXYGEN /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A. * * This function also supports in-place solves using the syntax x = decompositionObject.solve(x) . * * \note_about_checking_solutions * * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$ * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$, * \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then * \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the * least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function * computes the least-square solution of \f$ A x = b \f$ if \f$ A \f$ is singular. * * \sa MatrixBase::ldlt(), SelfAdjointView::ldlt() */ template inline const Solve solve(const MatrixBase& b) const; #endif template bool solveInPlace(MatrixBase &bAndX) const; template LDLT& compute(const EigenBase& matrix); /** \returns an estimate of the reciprocal condition number of the matrix of * which \c *this is the LDLT decomposition. */ RealScalar rcond() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return internal::rcond_estimate_helper(m_l1_norm, *this); } template LDLT& rankUpdate(const MatrixBase& w, const RealScalar& alpha=1); /** \returns the internal LDLT decomposition matrix * * TODO: document the storage layout */ inline const MatrixType& matrixLDLT() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_matrix; } MatrixType reconstructedMatrix() const; /** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint. * * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as: * \code x = decomposition.adjoint().solve(b) \endcode */ const LDLT& adjoint() const { return *this; }; EIGEN_DEVICE_FUNC inline EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); } EIGEN_DEVICE_FUNC inline EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was successful, * \c NumericalIssue if the factorization failed because of a zero pivot. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_info; } #ifndef EIGEN_PARSED_BY_DOXYGEN template void _solve_impl(const RhsType &rhs, DstType &dst) const; template void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const; #endif protected: EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) /** \internal * Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U. * The strict upper part is used during the decomposition, the strict lower * part correspond to the coefficients of L (its diagonal is equal to 1 and * is not stored), and the diagonal entries correspond to D. */ MatrixType m_matrix; RealScalar m_l1_norm; TranspositionType m_transpositions; TmpMatrixType m_temporary; internal::SignMatrix m_sign; bool m_isInitialized; ComputationInfo m_info; }; namespace internal { template struct ldlt_inplace; template<> struct ldlt_inplace { template static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) { using std::abs; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename TranspositionType::StorageIndex IndexType; eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); bool found_zero_pivot = false; bool ret = true; if (size <= 1) { transpositions.setIdentity(); if(size==0) sign = ZeroSign; else if (numext::real(mat.coeff(0,0)) > static_cast(0) ) sign = PositiveSemiDef; else if (numext::real(mat.coeff(0,0)) < static_cast(0)) sign = NegativeSemiDef; else sign = ZeroSign; return true; } for (Index k = 0; k < size; ++k) { // Find largest diagonal element Index index_of_biggest_in_corner; mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner); index_of_biggest_in_corner += k; transpositions.coeffRef(k) = IndexType(index_of_biggest_in_corner); if(k != index_of_biggest_in_corner) { // apply the transposition while taking care to consider only // the lower triangular part Index s = size-index_of_biggest_in_corner-1; // trailing size after the biggest element mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k)); mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s)); std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner)); for(Index i=k+1;i::IsComplex) mat.coeffRef(index_of_biggest_in_corner,k) = numext::conj(mat.coeff(index_of_biggest_in_corner,k)); } // partition the matrix: // A00 | - | - // lu = A10 | A11 | - // A20 | A21 | A22 Index rs = size - k - 1; Block A21(mat,k+1,k,rs,1); Block A10(mat,k,0,1,k); Block A20(mat,k+1,0,rs,k); if(k>0) { temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint(); mat.coeffRef(k,k) -= (A10 * temp.head(k)).value(); if(rs>0) A21.noalias() -= A20 * temp.head(k); } // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot // was smaller than the cutoff value. However, since LDLT is not rank-revealing // we should only make sure that we do not introduce INF or NaN values. // Remark that LAPACK also uses 0 as the cutoff value. RealScalar realAkk = numext::real(mat.coeffRef(k,k)); bool pivot_is_valid = (abs(realAkk) > RealScalar(0)); if(k==0 && !pivot_is_valid) { // The entire diagonal is zero, there is nothing more to do // except filling the transpositions, and checking whether the matrix is zero. sign = ZeroSign; for(Index j = 0; j0) && pivot_is_valid) A21 /= realAkk; else if(rs>0) ret = ret && (A21.array()==Scalar(0)).all(); if(found_zero_pivot && pivot_is_valid) ret = false; // factorization failed else if(!pivot_is_valid) found_zero_pivot = true; if (sign == PositiveSemiDef) { if (realAkk < static_cast(0)) sign = Indefinite; } else if (sign == NegativeSemiDef) { if (realAkk > static_cast(0)) sign = Indefinite; } else if (sign == ZeroSign) { if (realAkk > static_cast(0)) sign = PositiveSemiDef; else if (realAkk < static_cast(0)) sign = NegativeSemiDef; } } return ret; } // Reference for the algorithm: Davis and Hager, "Multiple Rank // Modifications of a Sparse Cholesky Factorization" (Algorithm 1) // Trivial rearrangements of their computations (Timothy E. Holy) // allow their algorithm to work for rank-1 updates even if the // original matrix is not of full rank. // Here only rank-1 updates are implemented, to reduce the // requirement for intermediate storage and improve accuracy template static bool updateInPlace(MatrixType& mat, MatrixBase& w, const typename MatrixType::RealScalar& sigma=1) { using numext::isfinite; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; const Index size = mat.rows(); eigen_assert(mat.cols() == size && w.size()==size); RealScalar alpha = 1; // Apply the update for (Index j = 0; j < size; j++) { // Check for termination due to an original decomposition of low-rank if (!(isfinite)(alpha)) break; // Update the diagonal terms RealScalar dj = numext::real(mat.coeff(j,j)); Scalar wj = w.coeff(j); RealScalar swj2 = sigma*numext::abs2(wj); RealScalar gamma = dj*alpha + swj2; mat.coeffRef(j,j) += swj2/alpha; alpha += swj2/dj; // Update the terms of L Index rs = size-j-1; w.tail(rs) -= wj * mat.col(j).tail(rs); if(gamma != 0) mat.col(j).tail(rs) += (sigma*numext::conj(wj)/gamma)*w.tail(rs); } return true; } template static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, const typename MatrixType::RealScalar& sigma=1) { // Apply the permutation to the input w tmp = transpositions * w; return ldlt_inplace::updateInPlace(mat,tmp,sigma); } }; template<> struct ldlt_inplace { template static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) { Transpose matt(mat); return ldlt_inplace::unblocked(matt, transpositions, temp, sign); } template static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, const typename MatrixType::RealScalar& sigma=1) { Transpose matt(mat); return ldlt_inplace::update(matt, transpositions, tmp, w.conjugate(), sigma); } }; template struct LDLT_Traits { typedef const TriangularView MatrixL; typedef const TriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } }; template struct LDLT_Traits { typedef const TriangularView MatrixL; typedef const TriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); } }; } // end namespace internal /** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix */ template template LDLT& LDLT::compute(const EigenBase& a) { eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); m_matrix = a.derived(); // Compute matrix L1 norm = max abs column sum. m_l1_norm = RealScalar(0); // TODO move this code to SelfAdjointView for (Index col = 0; col < size; ++col) { RealScalar abs_col_sum; if (UpLo_ == Lower) abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>(); else abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>(); if (abs_col_sum > m_l1_norm) m_l1_norm = abs_col_sum; } m_transpositions.resize(size); m_isInitialized = false; m_temporary.resize(size); m_sign = internal::ZeroSign; m_info = internal::ldlt_inplace::unblocked(m_matrix, m_transpositions, m_temporary, m_sign) ? Success : NumericalIssue; m_isInitialized = true; return *this; } /** Update the LDLT decomposition: given A = L D L^T, efficiently compute the decomposition of A + sigma w w^T. * \param w a vector to be incorporated into the decomposition. * \param sigma a scalar, +1 for updates and -1 for "downdates," which correspond to removing previously-added column vectors. Optional; default value is +1. * \sa setZero() */ template template LDLT& LDLT::rankUpdate(const MatrixBase& w, const typename LDLT::RealScalar& sigma) { typedef typename TranspositionType::StorageIndex IndexType; const Index size = w.rows(); if (m_isInitialized) { eigen_assert(m_matrix.rows()==size); } else { m_matrix.resize(size,size); m_matrix.setZero(); m_transpositions.resize(size); for (Index i = 0; i < size; i++) m_transpositions.coeffRef(i) = IndexType(i); m_temporary.resize(size); m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef; m_isInitialized = true; } internal::ldlt_inplace::update(m_matrix, m_transpositions, m_temporary, w, sigma); return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN template template void LDLT::_solve_impl(const RhsType &rhs, DstType &dst) const { _solve_impl_transposed(rhs, dst); } template template void LDLT::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const { // dst = P b dst = m_transpositions * rhs; // dst = L^-1 (P b) // dst = L^-*T (P b) matrixL().template conjugateIf().solveInPlace(dst); // dst = D^-* (L^-1 P b) // dst = D^-1 (L^-*T P b) // more precisely, use pseudo-inverse of D (see bug 241) using std::abs; const typename Diagonal::RealReturnType vecD(vectorD()); // In some previous versions, tolerance was set to the max of 1/highest (or rather numeric_limits::min()) // and the maximal diagonal entry * epsilon as motivated by LAPACK's xGELSS: // RealScalar tolerance = numext::maxi(vecD.array().abs().maxCoeff() * NumTraits::epsilon(),RealScalar(1) / NumTraits::highest()); // However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest // diagonal element is not well justified and leads to numerical issues in some cases. // Moreover, Lapack's xSYTRS routines use 0 for the tolerance. // Using numeric_limits::min() gives us more robustness to denormals. RealScalar tolerance = (std::numeric_limits::min)(); for (Index i = 0; i < vecD.size(); ++i) { if(abs(vecD(i)) > tolerance) dst.row(i) /= vecD(i); else dst.row(i).setZero(); } // dst = L^-* (D^-* L^-1 P b) // dst = L^-T (D^-1 L^-*T P b) matrixL().transpose().template conjugateIf().solveInPlace(dst); // dst = P^T (L^-* D^-* L^-1 P b) = A^-1 b // dst = P^-T (L^-T D^-1 L^-*T P b) = A^-1 b dst = m_transpositions.transpose() * dst; } #endif /** \internal use x = ldlt_object.solve(x); * * This is the \em in-place version of solve(). * * \param bAndX represents both the right-hand side matrix b and result x. * * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD. * * This version avoids a copy when the right hand side matrix b is not * needed anymore. * * \sa LDLT::solve(), MatrixBase::ldlt() */ template template bool LDLT::solveInPlace(MatrixBase &bAndX) const { eigen_assert(m_isInitialized && "LDLT is not initialized."); eigen_assert(m_matrix.rows() == bAndX.rows()); bAndX = this->solve(bAndX); return true; } /** \returns the matrix represented by the decomposition, * i.e., it returns the product: P^T L D L^* P. * This function is provided for debug purpose. */ template MatrixType LDLT::reconstructedMatrix() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); const Index size = m_matrix.rows(); MatrixType res(size,size); // P res.setIdentity(); res = transpositionsP() * res; // L^* P res = matrixU() * res; // D(L^*P) res = vectorD().real().asDiagonal() * res; // L(DL^*P) res = matrixL() * res; // P^T (LDL^*P) res = transpositionsP().transpose() * res; return res; } /** \cholesky_module * \returns the Cholesky decomposition with full pivoting without square root of \c *this * \sa MatrixBase::ldlt() */ template inline const LDLT::PlainObject, UpLo> SelfAdjointView::ldlt() const { return LDLT(m_matrix); } /** \cholesky_module * \returns the Cholesky decomposition with full pivoting without square root of \c *this * \sa SelfAdjointView::ldlt() */ template inline const LDLT::PlainObject> MatrixBase::ldlt() const { return LDLT(derived()); } } // end namespace Eigen #endif // EIGEN_LDLT_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Cholesky/LLT.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LLT_H #define EIGEN_LLT_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal{ template struct traits > : traits { typedef MatrixXpr XprKind; typedef SolverStorage StorageKind; typedef int StorageIndex; enum { Flags = 0 }; }; template struct LLT_Traits; } /** \ingroup Cholesky_Module * * \class LLT * * \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features * * \tparam MatrixType_ the type of the matrix of which we are computing the LL^T Cholesky decomposition * \tparam UpLo_ the triangular part that will be used for the decomposition: Lower (default) or Upper. * The other triangular part won't be read. * * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite * matrix A such that A = LL^* = U^*U, where L is lower triangular. * * While the Cholesky decomposition is particularly useful to solve selfadjoint problems like D^*D x = b, * for that purpose, we recommend the Cholesky decomposition without square root which is more stable * and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other * situations like generalised eigen problems with hermitian matrices. * * Remember that Cholesky decompositions are not rank-revealing. This LLT decomposition is only stable on positive definite matrices, * use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine whether a system of equations * has a solution. * * Example: \include LLT_example.cpp * Output: \verbinclude LLT_example.out * * \b Performance: for best performance, it is recommended to use a column-major storage format * with the Lower triangular part (the default), or, equivalently, a row-major storage format * with the Upper triangular part. Otherwise, you might get a 20% slowdown for the full factorization * step, and rank-updates can be up to 3 times slower. * * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. * * Note that during the decomposition, only the lower (or upper, as defined by UpLo_) triangular part of A is considered. * Therefore, the strict lower part does not have to store correct values. * * \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT */ template class LLT : public SolverBase > { public: typedef MatrixType_ MatrixType; typedef SolverBase Base; friend class SolverBase; EIGEN_GENERIC_PUBLIC_INTERFACE(LLT) enum { MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; enum { PacketSize = internal::packet_traits::size, AlignmentMask = int(PacketSize)-1, UpLo = UpLo_ }; typedef internal::LLT_Traits Traits; /** * \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via LLT::compute(const MatrixType&). */ LLT() : m_matrix(), m_isInitialized(false) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa LLT() */ explicit LLT(Index size) : m_matrix(size, size), m_isInitialized(false) {} template explicit LLT(const EigenBase& matrix) : m_matrix(matrix.rows(), matrix.cols()), m_isInitialized(false) { compute(matrix.derived()); } /** \brief Constructs a LLT factorization from a given matrix * * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when * \c MatrixType is a Eigen::Ref. * * \sa LLT(const EigenBase&) */ template explicit LLT(EigenBase& matrix) : m_matrix(matrix.derived()), m_isInitialized(false) { compute(matrix.derived()); } /** \returns a view of the upper triangular matrix U */ inline typename Traits::MatrixU matrixU() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return Traits::getU(m_matrix); } /** \returns a view of the lower triangular matrix L */ inline typename Traits::MatrixL matrixL() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return Traits::getL(m_matrix); } #ifdef EIGEN_PARSED_BY_DOXYGEN /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. * * Since this LLT class assumes anyway that the matrix A is invertible, the solution * theoretically exists and is unique regardless of b. * * Example: \include LLT_solve.cpp * Output: \verbinclude LLT_solve.out * * \sa solveInPlace(), MatrixBase::llt(), SelfAdjointView::llt() */ template inline const Solve solve(const MatrixBase& b) const; #endif template void solveInPlace(const MatrixBase &bAndX) const; template LLT& compute(const EigenBase& matrix); /** \returns an estimate of the reciprocal condition number of the matrix of * which \c *this is the Cholesky decomposition. */ RealScalar rcond() const { eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(m_info == Success && "LLT failed because matrix appears to be negative"); return internal::rcond_estimate_helper(m_l1_norm, *this); } /** \returns the LLT decomposition matrix * * TODO: document the storage layout */ inline const MatrixType& matrixLLT() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return m_matrix; } MatrixType reconstructedMatrix() const; /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was successful, * \c NumericalIssue if the matrix.appears not to be positive definite. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return m_info; } /** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint. * * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as: * \code x = decomposition.adjoint().solve(b) \endcode */ const LLT& adjoint() const EIGEN_NOEXCEPT { return *this; }; inline EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); } inline EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); } template LLT & rankUpdate(const VectorType& vec, const RealScalar& sigma = 1); #ifndef EIGEN_PARSED_BY_DOXYGEN template void _solve_impl(const RhsType &rhs, DstType &dst) const; template void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const; #endif protected: EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) /** \internal * Used to compute and store L * The strict upper part is not used and even not initialized. */ MatrixType m_matrix; RealScalar m_l1_norm; bool m_isInitialized; ComputationInfo m_info; }; namespace internal { template struct llt_inplace; template static Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) { using std::sqrt; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::ColXpr ColXpr; typedef typename internal::remove_all::type ColXprCleaned; typedef typename ColXprCleaned::SegmentReturnType ColXprSegment; typedef Matrix TempVectorType; typedef typename TempVectorType::SegmentReturnType TempVecSegment; Index n = mat.cols(); eigen_assert(mat.rows()==n && vec.size()==n); TempVectorType temp; if(sigma>0) { // This version is based on Givens rotations. // It is faster than the other one below, but only works for updates, // i.e., for sigma > 0 temp = sqrt(sigma) * vec; for(Index i=0; i g; g.makeGivens(mat(i,i), -temp(i), &mat(i,i)); Index rs = n-i-1; if(rs>0) { ColXprSegment x(mat.col(i).tail(rs)); TempVecSegment y(temp.tail(rs)); apply_rotation_in_the_plane(x, y, g); } } } else { temp = vec; RealScalar beta = 1; for(Index j=0; j struct llt_inplace { typedef typename NumTraits::Real RealScalar; template static Index unblocked(MatrixType& mat) { using std::sqrt; eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); for(Index k = 0; k < size; ++k) { Index rs = size-k-1; // remaining size Block A21(mat,k+1,k,rs,1); Block A10(mat,k,0,1,k); Block A20(mat,k+1,0,rs,k); RealScalar x = numext::real(mat.coeff(k,k)); if (k>0) x -= A10.squaredNorm(); if (x<=RealScalar(0)) return k; mat.coeffRef(k,k) = x = sqrt(x); if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint(); if (rs>0) A21 /= x; } return -1; } template static Index blocked(MatrixType& m) { eigen_assert(m.rows()==m.cols()); Index size = m.rows(); if(size<32) return unblocked(m); Index blockSize = size/8; blockSize = (blockSize/16)*16; blockSize = (std::min)((std::max)(blockSize,Index(8)), Index(128)); for (Index k=0; k A11(m,k, k, bs,bs); Block A21(m,k+bs,k, rs,bs); Block A22(m,k+bs,k+bs,rs,rs); Index ret; if((ret=unblocked(A11))>=0) return k+ret; if(rs>0) A11.adjoint().template triangularView().template solveInPlace(A21); if(rs>0) A22.template selfadjointView().rankUpdate(A21,typename NumTraits::Literal(-1)); // bottleneck } return -1; } template static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } }; template struct llt_inplace { typedef typename NumTraits::Real RealScalar; template static EIGEN_STRONG_INLINE Index unblocked(MatrixType& mat) { Transpose matt(mat); return llt_inplace::unblocked(matt); } template static EIGEN_STRONG_INLINE Index blocked(MatrixType& mat) { Transpose matt(mat); return llt_inplace::blocked(matt); } template static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) { Transpose matt(mat); return llt_inplace::rankUpdate(matt, vec.conjugate(), sigma); } }; template struct LLT_Traits { typedef const TriangularView MatrixL; typedef const TriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } static bool inplace_decomposition(MatrixType& m) { return llt_inplace::blocked(m)==-1; } }; template struct LLT_Traits { typedef const TriangularView MatrixL; typedef const TriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); } static bool inplace_decomposition(MatrixType& m) { return llt_inplace::blocked(m)==-1; } }; } // end namespace internal /** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix * * \returns a reference to *this * * Example: \include TutorialLinAlgComputeTwice.cpp * Output: \verbinclude TutorialLinAlgComputeTwice.out */ template template LLT& LLT::compute(const EigenBase& a) { eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); m_matrix.resize(size, size); if (!internal::is_same_dense(m_matrix, a.derived())) m_matrix = a.derived(); // Compute matrix L1 norm = max abs column sum. m_l1_norm = RealScalar(0); // TODO move this code to SelfAdjointView for (Index col = 0; col < size; ++col) { RealScalar abs_col_sum; if (UpLo_ == Lower) abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>(); else abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>(); if (abs_col_sum > m_l1_norm) m_l1_norm = abs_col_sum; } m_isInitialized = true; bool ok = Traits::inplace_decomposition(m_matrix); m_info = ok ? Success : NumericalIssue; return *this; } /** Performs a rank one update (or dowdate) of the current decomposition. * If A = LL^* before the rank one update, * then after it we have LL^* = A + sigma * v v^* where \a v must be a vector * of same dimension. */ template template LLT & LLT::rankUpdate(const VectorType& v, const RealScalar& sigma) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType); eigen_assert(v.size()==m_matrix.cols()); eigen_assert(m_isInitialized); if(internal::llt_inplace::rankUpdate(m_matrix,v,sigma)>=0) m_info = NumericalIssue; else m_info = Success; return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN template template void LLT::_solve_impl(const RhsType &rhs, DstType &dst) const { _solve_impl_transposed(rhs, dst); } template template void LLT::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const { dst = rhs; matrixL().template conjugateIf().solveInPlace(dst); matrixU().template conjugateIf().solveInPlace(dst); } #endif /** \internal use x = llt_object.solve(x); * * This is the \em in-place version of solve(). * * \param bAndX represents both the right-hand side matrix b and result x. * * This version avoids a copy when the right hand side matrix b is not needed anymore. * * \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here. * This function will const_cast it, so constness isn't honored here. * * \sa LLT::solve(), MatrixBase::llt() */ template template void LLT::solveInPlace(const MatrixBase &bAndX) const { eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(m_matrix.rows()==bAndX.rows()); matrixL().solveInPlace(bAndX); matrixU().solveInPlace(bAndX); } /** \returns the matrix represented by the decomposition, * i.e., it returns the product: L L^*. * This function is provided for debug purpose. */ template MatrixType LLT::reconstructedMatrix() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return matrixL() * matrixL().adjoint().toDenseMatrix(); } /** \cholesky_module * \returns the LLT decomposition of \c *this * \sa SelfAdjointView::llt() */ template inline const LLT::PlainObject> MatrixBase::llt() const { return LLT(derived()); } /** \cholesky_module * \returns the LLT decomposition of \c *this * \sa SelfAdjointView::llt() */ template inline const LLT::PlainObject, UpLo> SelfAdjointView::llt() const { return LLT(m_matrix); } } // end namespace Eigen #endif // EIGEN_LLT_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Cholesky/LLT_LAPACKE.h ================================================ /* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to LAPACKe * LLt decomposition based on LAPACKE_?potrf function. ******************************************************************************** */ #ifndef EIGEN_LLT_LAPACKE_H #define EIGEN_LLT_LAPACKE_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct lapacke_llt; #define EIGEN_LAPACKE_LLT(EIGTYPE, BLASTYPE, LAPACKE_PREFIX) \ template<> struct lapacke_llt \ { \ template \ static inline Index potrf(MatrixType& m, char uplo) \ { \ lapack_int matrix_order; \ lapack_int size, lda, info, StorageOrder; \ EIGTYPE* a; \ eigen_assert(m.rows()==m.cols()); \ /* Set up parameters for ?potrf */ \ size = convert_index(m.rows()); \ StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor; \ matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ a = &(m.coeffRef(0,0)); \ lda = convert_index(m.outerStride()); \ \ info = LAPACKE_##LAPACKE_PREFIX##potrf( matrix_order, uplo, size, (BLASTYPE*)a, lda ); \ info = (info==0) ? -1 : info>0 ? info-1 : size; \ return info; \ } \ }; \ template<> struct llt_inplace \ { \ template \ static Index blocked(MatrixType& m) \ { \ return lapacke_llt::potrf(m, 'L'); \ } \ template \ static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \ }; \ template<> struct llt_inplace \ { \ template \ static Index blocked(MatrixType& m) \ { \ return lapacke_llt::potrf(m, 'U'); \ } \ template \ static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ { \ Transpose matt(mat); \ return llt_inplace::rankUpdate(matt, vec.conjugate(), sigma); \ } \ }; EIGEN_LAPACKE_LLT(double, double, d) EIGEN_LAPACKE_LLT(float, float, s) EIGEN_LAPACKE_LLT(dcomplex, lapack_complex_double, z) EIGEN_LAPACKE_LLT(scomplex, lapack_complex_float, c) } // end namespace internal } // end namespace Eigen #endif // EIGEN_LLT_LAPACKE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/CholmodSupport/CholmodSupport.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CHOLMODSUPPORT_H #define EIGEN_CHOLMODSUPPORT_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct cholmod_configure_matrix; template<> struct cholmod_configure_matrix { template static void run(CholmodType& mat) { mat.xtype = CHOLMOD_REAL; mat.dtype = CHOLMOD_DOUBLE; } }; template<> struct cholmod_configure_matrix > { template static void run(CholmodType& mat) { mat.xtype = CHOLMOD_COMPLEX; mat.dtype = CHOLMOD_DOUBLE; } }; // Other scalar types are not yet supported by Cholmod // template<> struct cholmod_configure_matrix { // template // static void run(CholmodType& mat) { // mat.xtype = CHOLMOD_REAL; // mat.dtype = CHOLMOD_SINGLE; // } // }; // // template<> struct cholmod_configure_matrix > { // template // static void run(CholmodType& mat) { // mat.xtype = CHOLMOD_COMPLEX; // mat.dtype = CHOLMOD_SINGLE; // } // }; } // namespace internal /** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object. * Note that the data are shared. */ template cholmod_sparse viewAsCholmod(Ref > mat) { cholmod_sparse res; res.nzmax = mat.nonZeros(); res.nrow = mat.rows(); res.ncol = mat.cols(); res.p = mat.outerIndexPtr(); res.i = mat.innerIndexPtr(); res.x = mat.valuePtr(); res.z = 0; res.sorted = 1; if(mat.isCompressed()) { res.packed = 1; res.nz = 0; } else { res.packed = 0; res.nz = mat.innerNonZeroPtr(); } res.dtype = 0; res.stype = -1; if (internal::is_same::value) { res.itype = CHOLMOD_INT; } else if (internal::is_same::value) { res.itype = CHOLMOD_LONG; } else { eigen_assert(false && "Index type not supported yet"); } // setup res.xtype internal::cholmod_configure_matrix::run(res); res.stype = 0; return res; } template const cholmod_sparse viewAsCholmod(const SparseMatrix& mat) { cholmod_sparse res = viewAsCholmod(Ref >(mat.const_cast_derived())); return res; } template const cholmod_sparse viewAsCholmod(const SparseVector& mat) { cholmod_sparse res = viewAsCholmod(Ref >(mat.const_cast_derived())); return res; } /** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix. * The data are not copied but shared. */ template cholmod_sparse viewAsCholmod(const SparseSelfAdjointView, UpLo>& mat) { cholmod_sparse res = viewAsCholmod(Ref >(mat.matrix().const_cast_derived())); if(UpLo==Upper) res.stype = 1; if(UpLo==Lower) res.stype = -1; // swap stype for rowmajor matrices (only works for real matrices) EIGEN_STATIC_ASSERT((Options_ & RowMajorBit) == 0 || NumTraits::IsComplex == 0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); if(Options_ & RowMajorBit) res.stype *=-1; return res; } /** Returns a view of the Eigen \b dense matrix \a mat as Cholmod dense matrix. * The data are not copied but shared. */ template cholmod_dense viewAsCholmod(MatrixBase& mat) { EIGEN_STATIC_ASSERT((internal::traits::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); typedef typename Derived::Scalar Scalar; cholmod_dense res; res.nrow = mat.rows(); res.ncol = mat.cols(); res.nzmax = res.nrow * res.ncol; res.d = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride(); res.x = (void*)(mat.derived().data()); res.z = 0; internal::cholmod_configure_matrix::run(res); return res; } /** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix. * The data are not copied but shared. */ template MappedSparseMatrix viewAsEigen(cholmod_sparse& cm) { return MappedSparseMatrix (cm.nrow, cm.ncol, static_cast(cm.p)[cm.ncol], static_cast(cm.p), static_cast(cm.i),static_cast(cm.x) ); } namespace internal { // template specializations for int and long that call the correct cholmod method #define EIGEN_CHOLMOD_SPECIALIZE0(ret, name) \ template inline ret cm_ ## name (cholmod_common &Common) { return cholmod_ ## name (&Common); } \ template<> inline ret cm_ ## name (cholmod_common &Common) { return cholmod_l_ ## name (&Common); } #define EIGEN_CHOLMOD_SPECIALIZE1(ret, name, t1, a1) \ template inline ret cm_ ## name (t1& a1, cholmod_common &Common) { return cholmod_ ## name (&a1, &Common); } \ template<> inline ret cm_ ## name (t1& a1, cholmod_common &Common) { return cholmod_l_ ## name (&a1, &Common); } EIGEN_CHOLMOD_SPECIALIZE0(int, start) EIGEN_CHOLMOD_SPECIALIZE0(int, finish) EIGEN_CHOLMOD_SPECIALIZE1(int, free_factor, cholmod_factor*, L) EIGEN_CHOLMOD_SPECIALIZE1(int, free_dense, cholmod_dense*, X) EIGEN_CHOLMOD_SPECIALIZE1(int, free_sparse, cholmod_sparse*, A) EIGEN_CHOLMOD_SPECIALIZE1(cholmod_factor*, analyze, cholmod_sparse, A) template inline cholmod_dense* cm_solve (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_solve (sys, &L, &B, &Common); } template<> inline cholmod_dense* cm_solve (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_l_solve (sys, &L, &B, &Common); } template inline cholmod_sparse* cm_spsolve (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_spsolve (sys, &L, &B, &Common); } template<> inline cholmod_sparse* cm_spsolve (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_l_spsolve (sys, &L, &B, &Common); } template inline int cm_factorize_p (cholmod_sparse* A, double beta[2], StorageIndex_* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_factorize_p (A, beta, fset, fsize, L, &Common); } template<> inline int cm_factorize_p (cholmod_sparse* A, double beta[2], SuiteSparse_long* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_l_factorize_p (A, beta, fset, fsize, L, &Common); } #undef EIGEN_CHOLMOD_SPECIALIZE0 #undef EIGEN_CHOLMOD_SPECIALIZE1 } // namespace internal enum CholmodMode { CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt }; /** \ingroup CholmodSupport_Module * \class CholmodBase * \brief The base class for the direct Cholesky factorization of Cholmod * \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT */ template class CholmodBase : public SparseSolverBase { protected: typedef SparseSolverBase Base; using Base::derived; using Base::m_isInitialized; public: typedef MatrixType_ MatrixType; enum { UpLo = UpLo_ }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef MatrixType CholMatrixType; typedef typename MatrixType::StorageIndex StorageIndex; enum { ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; public: CholmodBase() : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false) { EIGEN_STATIC_ASSERT((internal::is_same::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY); m_shiftOffset[0] = m_shiftOffset[1] = 0.0; internal::cm_start(m_cholmod); } explicit CholmodBase(const MatrixType& matrix) : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false) { EIGEN_STATIC_ASSERT((internal::is_same::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY); m_shiftOffset[0] = m_shiftOffset[1] = 0.0; internal::cm_start(m_cholmod); compute(matrix); } ~CholmodBase() { if(m_cholmodFactor) internal::cm_free_factor(m_cholmodFactor, m_cholmod); internal::cm_finish(m_cholmod); } inline StorageIndex cols() const { return internal::convert_index(m_cholmodFactor->n); } inline StorageIndex rows() const { return internal::convert_index(m_cholmodFactor->n); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was successful, * \c NumericalIssue if the matrix.appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } /** Computes the sparse Cholesky decomposition of \a matrix */ Derived& compute(const MatrixType& matrix) { analyzePattern(matrix); factorize(matrix); return derived(); } /** Performs a symbolic decomposition on the sparsity pattern of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * * \sa factorize() */ void analyzePattern(const MatrixType& matrix) { if(m_cholmodFactor) { internal::cm_free_factor(m_cholmodFactor, m_cholmod); m_cholmodFactor = 0; } cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView()); m_cholmodFactor = internal::cm_analyze(A, m_cholmod); this->m_isInitialized = true; this->m_info = Success; m_analysisIsOk = true; m_factorizationIsOk = false; } /** Performs a numeric decomposition of \a matrix * * The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed. * * \sa analyzePattern() */ void factorize(const MatrixType& matrix) { eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView()); internal::cm_factorize_p(&A, m_shiftOffset, 0, 0, m_cholmodFactor, m_cholmod); // If the factorization failed, minor is the column at which it did. On success minor == n. this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue); m_factorizationIsOk = true; } /** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations. * See the Cholmod user guide for details. */ cholmod_common& cholmod() { return m_cholmod; } #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal */ template void _solve_impl(const MatrixBase &b, MatrixBase &dest) const { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); const Index size = m_cholmodFactor->n; EIGEN_UNUSED_VARIABLE(size); eigen_assert(size==b.rows()); // Cholmod needs column-major storage without inner-stride, which corresponds to the default behavior of Ref. Ref > b_ref(b.derived()); cholmod_dense b_cd = viewAsCholmod(b_ref); cholmod_dense* x_cd = internal::cm_solve(CHOLMOD_A, *m_cholmodFactor, b_cd, m_cholmod); if(!x_cd) { this->m_info = NumericalIssue; return; } // TODO optimize this copy by swapping when possible (be careful with alignment, etc.) // NOTE Actually, the copy can be avoided by calling cholmod_solve2 instead of cholmod_solve dest = Matrix::Map(reinterpret_cast(x_cd->x),b.rows(),b.cols()); internal::cm_free_dense(x_cd, m_cholmod); } /** \internal */ template void _solve_impl(const SparseMatrixBase &b, SparseMatrixBase &dest) const { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); const Index size = m_cholmodFactor->n; EIGEN_UNUSED_VARIABLE(size); eigen_assert(size==b.rows()); // note: cs stands for Cholmod Sparse Ref > b_ref(b.const_cast_derived()); cholmod_sparse b_cs = viewAsCholmod(b_ref); cholmod_sparse* x_cs = internal::cm_spsolve(CHOLMOD_A, *m_cholmodFactor, b_cs, m_cholmod); if(!x_cs) { this->m_info = NumericalIssue; return; } // TODO optimize this copy by swapping when possible (be careful with alignment, etc.) // NOTE cholmod_spsolve in fact just calls the dense solver for blocks of 4 columns at a time (similar to Eigen's sparse solver) dest.derived() = viewAsEigen(*x_cs); internal::cm_free_sparse(x_cs, m_cholmod); } #endif // EIGEN_PARSED_BY_DOXYGEN /** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization. * * During the numerical factorization, an offset term is added to the diagonal coefficients:\n * \c d_ii = \a offset + \c d_ii * * The default is \a offset=0. * * \returns a reference to \c *this. */ Derived& setShift(const RealScalar& offset) { m_shiftOffset[0] = double(offset); return derived(); } /** \returns the determinant of the underlying matrix from the current factorization */ Scalar determinant() const { using std::exp; return exp(logDeterminant()); } /** \returns the log determinant of the underlying matrix from the current factorization */ Scalar logDeterminant() const { using std::log; using numext::real; eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); RealScalar logDet = 0; Scalar *x = static_cast(m_cholmodFactor->x); if (m_cholmodFactor->is_super) { // Supernodal factorization stored as a packed list of dense column-major blocs, // as described by the following structure: // super[k] == index of the first column of the j-th super node StorageIndex *super = static_cast(m_cholmodFactor->super); // pi[k] == offset to the description of row indices StorageIndex *pi = static_cast(m_cholmodFactor->pi); // px[k] == offset to the respective dense block StorageIndex *px = static_cast(m_cholmodFactor->px); Index nb_super_nodes = m_cholmodFactor->nsuper; for (Index k=0; k < nb_super_nodes; ++k) { StorageIndex ncols = super[k + 1] - super[k]; StorageIndex nrows = pi[k + 1] - pi[k]; Map, 0, InnerStride<> > sk(x + px[k], ncols, InnerStride<>(nrows+1)); logDet += sk.real().log().sum(); } } else { // Simplicial factorization stored as standard CSC matrix. StorageIndex *p = static_cast(m_cholmodFactor->p); Index size = m_cholmodFactor->n; for (Index k=0; kis_ll) logDet *= 2.0; return logDet; }; template void dumpMemory(Stream& /*s*/) {} protected: mutable cholmod_common m_cholmod; cholmod_factor* m_cholmodFactor; double m_shiftOffset[2]; mutable ComputationInfo m_info; int m_factorizationIsOk; int m_analysisIsOk; }; /** \ingroup CholmodSupport_Module * \class CholmodSimplicialLLT * \brief A simplicial direct Cholesky (LLT) factorization and solver based on Cholmod * * This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization * using the Cholmod library. * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Therefore, it has little practical interest. * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam UpLo_ the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * * \implsparsesolverconcept * * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * * \warning Only double precision real and complex scalar types are supported by Cholmod. * * \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLLT */ template class CholmodSimplicialLLT : public CholmodBase > { typedef CholmodBase Base; using Base::m_cholmod; public: typedef MatrixType_ MatrixType; CholmodSimplicialLLT() : Base() { init(); } CholmodSimplicialLLT(const MatrixType& matrix) : Base() { init(); this->compute(matrix); } ~CholmodSimplicialLLT() {} protected: void init() { m_cholmod.final_asis = 0; m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; m_cholmod.final_ll = 1; } }; /** \ingroup CholmodSupport_Module * \class CholmodSimplicialLDLT * \brief A simplicial direct Cholesky (LDLT) factorization and solver based on Cholmod * * This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization * using the Cholmod library. * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Therefore, it has little practical interest. * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam UpLo_ the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * * \implsparsesolverconcept * * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * * \warning Only double precision real and complex scalar types are supported by Cholmod. * * \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLDLT */ template class CholmodSimplicialLDLT : public CholmodBase > { typedef CholmodBase Base; using Base::m_cholmod; public: typedef MatrixType_ MatrixType; CholmodSimplicialLDLT() : Base() { init(); } CholmodSimplicialLDLT(const MatrixType& matrix) : Base() { init(); this->compute(matrix); } ~CholmodSimplicialLDLT() {} protected: void init() { m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; } }; /** \ingroup CholmodSupport_Module * \class CholmodSupernodalLLT * \brief A supernodal Cholesky (LLT) factorization and solver based on Cholmod * * This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization * using the Cholmod library. * This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM. * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam UpLo_ the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * * \implsparsesolverconcept * * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * * \warning Only double precision real and complex scalar types are supported by Cholmod. * * \sa \ref TutorialSparseSolverConcept */ template class CholmodSupernodalLLT : public CholmodBase > { typedef CholmodBase Base; using Base::m_cholmod; public: typedef MatrixType_ MatrixType; CholmodSupernodalLLT() : Base() { init(); } CholmodSupernodalLLT(const MatrixType& matrix) : Base() { init(); this->compute(matrix); } ~CholmodSupernodalLLT() {} protected: void init() { m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_SUPERNODAL; } }; /** \ingroup CholmodSupport_Module * \class CholmodDecomposition * \brief A general Cholesky factorization and solver based on Cholmod * * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization * using the Cholmod library. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * This variant permits to change the underlying Cholesky method at runtime. * On the other hand, it does not provide access to the result of the factorization. * The default is to let Cholmod automatically choose between a simplicial and supernodal factorization. * * \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam UpLo_ the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * * \implsparsesolverconcept * * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * * \warning Only double precision real and complex scalar types are supported by Cholmod. * * \sa \ref TutorialSparseSolverConcept */ template class CholmodDecomposition : public CholmodBase > { typedef CholmodBase Base; using Base::m_cholmod; public: typedef MatrixType_ MatrixType; CholmodDecomposition() : Base() { init(); } CholmodDecomposition(const MatrixType& matrix) : Base() { init(); this->compute(matrix); } ~CholmodDecomposition() {} void setMode(CholmodMode mode) { switch(mode) { case CholmodAuto: m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_AUTO; break; case CholmodSimplicialLLt: m_cholmod.final_asis = 0; m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; m_cholmod.final_ll = 1; break; case CholmodSupernodalLLt: m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_SUPERNODAL; break; case CholmodLDLt: m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; break; default: break; } } protected: void init() { m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_AUTO; } }; } // end namespace Eigen #endif // EIGEN_CHOLMODSUPPORT_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/CholmodSupport/InternalHeaderCheck.h ================================================ #ifndef EIGEN_CHOLMODSUPPORT_MODULE_H #error "Please include Eigen/CholmodSupport instead of including headers inside the src directory directly." #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/ArithmeticSequence.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2017 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARITHMETIC_SEQUENCE_H #define EIGEN_ARITHMETIC_SEQUENCE_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { #if (!EIGEN_HAS_CXX11) || !((!EIGEN_COMP_GNUC) || EIGEN_COMP_GNUC>=48) template struct aseq_negate {}; template<> struct aseq_negate { typedef Index type; }; template struct aseq_negate > { typedef FixedInt<-N> type; }; // Compilation error in the following case: template<> struct aseq_negate > {}; template::value, bool SizeIsSymbolic =symbolic::is_symbolic::value> struct aseq_reverse_first_type { typedef Index type; }; template struct aseq_reverse_first_type { typedef symbolic::AddExpr > >, symbolic::ValueExpr > > type; }; template struct aseq_reverse_first_type_aux { typedef Index type; }; template struct aseq_reverse_first_type_aux::type> { typedef FixedInt<(SizeType::value-1)*IncrType::value> type; }; template struct aseq_reverse_first_type { typedef typename aseq_reverse_first_type_aux::type Aux; typedef symbolic::AddExpr > type; }; template struct aseq_reverse_first_type { typedef symbolic::AddExpr > >, symbolic::ValueExpr >, symbolic::ValueExpr<> > type; }; #endif // Helper to cleanup the type of the increment: template struct cleanup_seq_incr { typedef typename cleanup_index_type::type type; }; } // namespace internal //-------------------------------------------------------------------------------- // seq(first,last,incr) and seqN(first,size,incr) //-------------------------------------------------------------------------------- template > class ArithmeticSequence; template ArithmeticSequence::type, typename internal::cleanup_index_type::type, typename internal::cleanup_seq_incr::type > seqN(FirstType first, SizeType size, IncrType incr); /** \class ArithmeticSequence * \ingroup Core_Module * * This class represents an arithmetic progression \f$ a_0, a_1, a_2, ..., a_{n-1}\f$ defined by * its \em first value \f$ a_0 \f$, its \em size (aka length) \em n, and the \em increment (aka stride) * that is equal to \f$ a_{i+1}-a_{i}\f$ for any \em i. * * It is internally used as the return type of the Eigen::seq and Eigen::seqN functions, and as the input arguments * of DenseBase::operator()(const RowIndices&, const ColIndices&), and most of the time this is the * only way it is used. * * \tparam FirstType type of the first element, usually an Index, * but internally it can be a symbolic expression * \tparam SizeType type representing the size of the sequence, usually an Index * or a compile time integral constant. Internally, it can also be a symbolic expression * \tparam IncrType type of the increment, can be a runtime Index, or a compile time integral constant (default is compile-time 1) * * \sa Eigen::seq, Eigen::seqN, DenseBase::operator()(const RowIndices&, const ColIndices&), class IndexedView */ template class ArithmeticSequence { public: ArithmeticSequence(FirstType first, SizeType size) : m_first(first), m_size(size) {} ArithmeticSequence(FirstType first, SizeType size, IncrType incr) : m_first(first), m_size(size), m_incr(incr) {} enum { SizeAtCompileTime = internal::get_fixed_value::value, IncrAtCompileTime = internal::get_fixed_value::value }; /** \returns the size, i.e., number of elements, of the sequence */ Index size() const { return m_size; } /** \returns the first element \f$ a_0 \f$ in the sequence */ Index first() const { return m_first; } /** \returns the value \f$ a_i \f$ at index \a i in the sequence. */ Index operator[](Index i) const { return m_first + i * m_incr; } const FirstType& firstObject() const { return m_first; } const SizeType& sizeObject() const { return m_size; } const IncrType& incrObject() const { return m_incr; } protected: FirstType m_first; SizeType m_size; IncrType m_incr; public: #if EIGEN_HAS_CXX11 && ((!EIGEN_COMP_GNUC) || EIGEN_COMP_GNUC>=48) auto reverse() const -> decltype(Eigen::seqN(m_first+(m_size+fix<-1>())*m_incr,m_size,-m_incr)) { return seqN(m_first+(m_size+fix<-1>())*m_incr,m_size,-m_incr); } #else protected: typedef typename internal::aseq_negate::type ReverseIncrType; typedef typename internal::aseq_reverse_first_type::type ReverseFirstType; public: ArithmeticSequence reverse() const { return seqN(m_first+(m_size+fix<-1>())*m_incr,m_size,-m_incr); } #endif }; /** \returns an ArithmeticSequence starting at \a first, of length \a size, and increment \a incr * * \sa seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) */ template ArithmeticSequence::type,typename internal::cleanup_index_type::type,typename internal::cleanup_seq_incr::type > seqN(FirstType first, SizeType size, IncrType incr) { return ArithmeticSequence::type,typename internal::cleanup_index_type::type,typename internal::cleanup_seq_incr::type>(first,size,incr); } /** \returns an ArithmeticSequence starting at \a first, of length \a size, and unit increment * * \sa seqN(FirstType,SizeType,IncrType), seq(FirstType,LastType) */ template ArithmeticSequence::type,typename internal::cleanup_index_type::type > seqN(FirstType first, SizeType size) { return ArithmeticSequence::type,typename internal::cleanup_index_type::type>(first,size); } #ifdef EIGEN_PARSED_BY_DOXYGEN /** \returns an ArithmeticSequence starting at \a f, up (or down) to \a l, and with positive (or negative) increment \a incr * * It is essentially an alias to: * \code * seqN(f, (l-f+incr)/incr, incr); * \endcode * * \sa seqN(FirstType,SizeType,IncrType), seq(FirstType,LastType) */ template auto seq(FirstType f, LastType l, IncrType incr); /** \returns an ArithmeticSequence starting at \a f, up (or down) to \a l, and unit increment * * It is essentially an alias to: * \code * seqN(f,l-f+1); * \endcode * * \sa seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) */ template auto seq(FirstType f, LastType l); #else // EIGEN_PARSED_BY_DOXYGEN #if EIGEN_HAS_CXX11 template auto seq(FirstType f, LastType l) -> decltype(seqN(typename internal::cleanup_index_type::type(f), ( typename internal::cleanup_index_type::type(l) - typename internal::cleanup_index_type::type(f)+fix<1>()))) { return seqN(typename internal::cleanup_index_type::type(f), (typename internal::cleanup_index_type::type(l) -typename internal::cleanup_index_type::type(f)+fix<1>())); } template auto seq(FirstType f, LastType l, IncrType incr) -> decltype(seqN(typename internal::cleanup_index_type::type(f), ( typename internal::cleanup_index_type::type(l) - typename internal::cleanup_index_type::type(f)+typename internal::cleanup_seq_incr::type(incr) ) / typename internal::cleanup_seq_incr::type(incr), typename internal::cleanup_seq_incr::type(incr))) { typedef typename internal::cleanup_seq_incr::type CleanedIncrType; return seqN(typename internal::cleanup_index_type::type(f), ( typename internal::cleanup_index_type::type(l) -typename internal::cleanup_index_type::type(f)+CleanedIncrType(incr)) / CleanedIncrType(incr), CleanedIncrType(incr)); } #else // EIGEN_HAS_CXX11 template typename internal::enable_if::value || symbolic::is_symbolic::value), ArithmeticSequence::type,Index> >::type seq(FirstType f, LastType l) { return seqN(typename internal::cleanup_index_type::type(f), Index((typename internal::cleanup_index_type::type(l)-typename internal::cleanup_index_type::type(f)+fix<1>()))); } template typename internal::enable_if::value, ArithmeticSequence,symbolic::ValueExpr<> >, symbolic::ValueExpr > > > >::type seq(const symbolic::BaseExpr &f, LastType l) { return seqN(f.derived(),(typename internal::cleanup_index_type::type(l)-f.derived()+fix<1>())); } template typename internal::enable_if::value, ArithmeticSequence::type, symbolic::AddExpr >, symbolic::ValueExpr > > > >::type seq(FirstType f, const symbolic::BaseExpr &l) { return seqN(typename internal::cleanup_index_type::type(f),(l.derived()-typename internal::cleanup_index_type::type(f)+fix<1>())); } template ArithmeticSequence >,symbolic::ValueExpr > > > seq(const symbolic::BaseExpr &f, const symbolic::BaseExpr &l) { return seqN(f.derived(),(l.derived()-f.derived()+fix<1>())); } template typename internal::enable_if::value || symbolic::is_symbolic::value), ArithmeticSequence::type,Index,typename internal::cleanup_seq_incr::type> >::type seq(FirstType f, LastType l, IncrType incr) { typedef typename internal::cleanup_seq_incr::type CleanedIncrType; return seqN(typename internal::cleanup_index_type::type(f), Index((typename internal::cleanup_index_type::type(l)-typename internal::cleanup_index_type::type(f)+CleanedIncrType(incr))/CleanedIncrType(incr)), incr); } template typename internal::enable_if::value, ArithmeticSequence, symbolic::ValueExpr<> >, symbolic::ValueExpr::type> >, symbolic::ValueExpr::type> >, typename internal::cleanup_seq_incr::type> >::type seq(const symbolic::BaseExpr &f, LastType l, IncrType incr) { typedef typename internal::cleanup_seq_incr::type CleanedIncrType; return seqN(f.derived(),(typename internal::cleanup_index_type::type(l)-f.derived()+CleanedIncrType(incr))/CleanedIncrType(incr), incr); } template typename internal::enable_if::value, ArithmeticSequence::type, symbolic::QuotientExpr >, symbolic::ValueExpr::type> >, symbolic::ValueExpr::type> >, typename internal::cleanup_seq_incr::type> >::type seq(FirstType f, const symbolic::BaseExpr &l, IncrType incr) { typedef typename internal::cleanup_seq_incr::type CleanedIncrType; return seqN(typename internal::cleanup_index_type::type(f), (l.derived()-typename internal::cleanup_index_type::type(f)+CleanedIncrType(incr))/CleanedIncrType(incr), incr); } template ArithmeticSequence >, symbolic::ValueExpr::type> >, symbolic::ValueExpr::type> >, typename internal::cleanup_seq_incr::type> seq(const symbolic::BaseExpr &f, const symbolic::BaseExpr &l, IncrType incr) { typedef typename internal::cleanup_seq_incr::type CleanedIncrType; return seqN(f.derived(),(l.derived()-f.derived()+CleanedIncrType(incr))/CleanedIncrType(incr), incr); } #endif // EIGEN_HAS_CXX11 #endif // EIGEN_PARSED_BY_DOXYGEN namespace placeholders { #if EIGEN_HAS_CXX11 || defined(EIGEN_PARSED_BY_DOXYGEN) /** \cpp11 * \returns a symbolic ArithmeticSequence representing the last \a size elements with increment \a incr. * * It is a shortcut for: \code seqN(last-(size-fix<1>)*incr, size, incr) \endcode * * \sa lastN(SizeType), seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) */ template auto lastN(SizeType size, IncrType incr) -> decltype(seqN(Eigen::placeholders::last-(size-fix<1>())*incr, size, incr)) { return seqN(Eigen::placeholders::last-(size-fix<1>())*incr, size, incr); } /** \cpp11 * \returns a symbolic ArithmeticSequence representing the last \a size elements with a unit increment. * * It is a shortcut for: \code seq(last+fix<1>-size, last) \endcode * * \sa lastN(SizeType,IncrType, seqN(FirstType,SizeType), seq(FirstType,LastType) */ template auto lastN(SizeType size) -> decltype(seqN(Eigen::placeholders::last+fix<1>()-size, size)) { return seqN(Eigen::placeholders::last+fix<1>()-size, size); } #endif } // namespace placeholders namespace internal { // Convert a symbolic span into a usable one (i.e., remove last/end "keywords") template struct make_size_type { typedef typename internal::conditional::value, Index, T>::type type; }; template struct IndexedViewCompatibleType, XprSize> { typedef ArithmeticSequence::type,IncrType> type; }; template ArithmeticSequence::type,IncrType> makeIndexedViewCompatible(const ArithmeticSequence& ids, Index size,SpecializedType) { return ArithmeticSequence::type,IncrType>( eval_expr_given_size(ids.firstObject(),size),eval_expr_given_size(ids.sizeObject(),size),ids.incrObject()); } template struct get_compile_time_incr > { enum { value = get_fixed_value::value }; }; } // end namespace internal /** \namespace Eigen::indexing * \ingroup Core_Module * * The sole purpose of this namespace is to be able to import all functions * and symbols that are expected to be used within operator() for indexing * and slicing. If you already imported the whole Eigen namespace: * \code using namespace Eigen; \endcode * then you are already all set. Otherwise, if you don't want/cannot import * the whole Eigen namespace, the following line: * \code using namespace Eigen::indexing; \endcode * is equivalent to: * \code using Eigen::fix; using Eigen::seq; using Eigen::seqN; using Eigen::placeholders::all; using Eigen::placeholders::last; using Eigen::placeholders::lastN; // c++11 only using Eigen::placeholders::lastp1; \endcode */ namespace indexing { using Eigen::fix; using Eigen::seq; using Eigen::seqN; using Eigen::placeholders::all; using Eigen::placeholders::last; #if EIGEN_HAS_CXX11 using Eigen::placeholders::lastN; #endif using Eigen::placeholders::lastp1; } } // end namespace Eigen #endif // EIGEN_ARITHMETIC_SEQUENCE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Array.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARRAY_H #define EIGEN_ARRAY_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > : traits > { typedef ArrayXpr XprKind; typedef ArrayBase > XprBase; }; } /** \class Array * \ingroup Core_Module * * \brief General-purpose arrays with easy API for coefficient-wise operations * * The %Array class is very similar to the Matrix class. It provides * general-purpose one- and two-dimensional arrays. The difference between the * %Array and the %Matrix class is primarily in the API: the API for the * %Array class provides easy access to coefficient-wise operations, while the * API for the %Matrix class provides easy access to linear-algebra * operations. * * See documentation of class Matrix for detailed information on the template parameters * storage layout. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAY_PLUGIN. * * \sa \blank \ref TutorialArrayClass, \ref TopicClassHierarchy */ template class Array : public PlainObjectBase > { public: typedef PlainObjectBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(Array) enum { Options = Options_ }; typedef typename Base::PlainObject PlainObject; protected: template friend struct internal::conservative_resize_like_impl; using Base::m_storage; public: using Base::base; using Base::coeff; using Base::coeffRef; /** * The usage of * using Base::operator=; * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped * the usage of 'using'. This should be done only for operator=. */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const EigenBase &other) { return Base::operator=(other); } /** Set all the entries to \a value. * \sa DenseBase::setConstant(), DenseBase::fill() */ /* This overload is needed because the usage of * using Base::operator=; * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped * the usage of 'using'. This should be done only for operator=. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const Scalar &value) { Base::setConstant(value); return *this; } /** Copies the value of the expression \a other into \c *this with automatic resizing. * * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), * it will be initialized. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const DenseBase& other) { return Base::_set(other); } /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const Array& other) { return Base::_set(other); } /** Default constructor. * * For fixed-size matrices, does nothing. * * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix * is called a null matrix. This constructor is the unique way to create null matrices: resizing * a matrix to 0 is not supported. * * \sa resize(Index,Index) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array() : Base() { EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #ifndef EIGEN_PARSED_BY_DOXYGEN // FIXME is it still needed ?? /** \internal */ EIGEN_DEVICE_FUNC Array(internal::constructor_without_unaligned_array_assert) : Base(internal::constructor_without_unaligned_array_assert()) { EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #endif #if EIGEN_HAS_RVALUE_REFERENCES EIGEN_DEVICE_FUNC Array(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible::value) : Base(std::move(other)) { } EIGEN_DEVICE_FUNC Array& operator=(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable::value) { Base::operator=(std::move(other)); return *this; } #endif #if EIGEN_HAS_CXX11 /** \copydoc PlainObjectBase(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) * * Example: \include Array_variadic_ctor_cxx11.cpp * Output: \verbinclude Array_variadic_ctor_cxx11.out * * \sa Array(const std::initializer_list>&) * \sa Array(const Scalar&), Array(const Scalar&,const Scalar&) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) : Base(a0, a1, a2, a3, args...) {} /** \brief Constructs an array and initializes it from the coefficients given as initializer-lists grouped by row. \cpp11 * * In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients: * * Example: \include Array_initializer_list_23_cxx11.cpp * Output: \verbinclude Array_initializer_list_23_cxx11.out * * Each of the inner initializer lists must contain the exact same number of elements, otherwise an assertion is triggered. * * In the case of a compile-time column 1D array, implicit transposition from a single row is allowed. * Therefore Array{{1,2,3,4,5}} is legal and the more verbose syntax * Array{{1},{2},{3},{4},{5}} can be avoided: * * Example: \include Array_initializer_list_vector_cxx11.cpp * Output: \verbinclude Array_initializer_list_vector_cxx11.out * * In the case of fixed-sized arrays, the initializer list sizes must exactly match the array sizes, * and implicit transposition is allowed for compile-time 1D arrays only. * * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const std::initializer_list>& list) : Base(list) {} #endif // end EIGEN_HAS_CXX11 #ifndef EIGEN_PARSED_BY_DOXYGEN template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Array(const T& x) { Base::template _init1(x); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const T0& val0, const T1& val1) { this->template _init2(val0, val1); } #else /** \brief Constructs a fixed-sized array initialized with coefficients starting at \a data */ EIGEN_DEVICE_FUNC explicit Array(const Scalar *data); /** Constructs a vector or row-vector with given dimension. \only_for_vectors * * Note that this is only useful for dynamic-size vectors. For fixed-size vectors, * it is redundant to pass the dimension here, so it makes more sense to use the default * constructor Array() instead. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Array(Index dim); /** constructs an initialized 1x1 Array with the given coefficient * \sa const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args */ Array(const Scalar& value); /** constructs an uninitialized array with \a rows rows and \a cols columns. * * This is useful for dynamic-size arrays. For fixed-size arrays, * it is redundant to pass these parameters, so one should use the default constructor * Array() instead. */ Array(Index rows, Index cols); /** constructs an initialized 2D vector with given coefficients * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) */ Array(const Scalar& val0, const Scalar& val1); #endif // end EIGEN_PARSED_BY_DOXYGEN /** constructs an initialized 3D vector with given coefficients * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3) m_storage.data()[0] = val0; m_storage.data()[1] = val1; m_storage.data()[2] = val2; } /** constructs an initialized 4D vector with given coefficients * \sa Array(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2, const Scalar& val3) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4) m_storage.data()[0] = val0; m_storage.data()[1] = val1; m_storage.data()[2] = val2; m_storage.data()[3] = val3; } /** Copy constructor */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Array& other) : Base(other) { } private: struct PrivateType {}; public: /** \sa MatrixBase::operator=(const EigenBase&) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const EigenBase &other, typename internal::enable_if::value, PrivateType>::type = PrivateType()) : Base(other.derived()) { } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT{ return 1; } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return this->innerSize(); } #ifdef EIGEN_ARRAY_PLUGIN #include EIGEN_ARRAY_PLUGIN #endif private: template friend struct internal::matrix_swap_impl; }; /** \defgroup arraytypedefs Global array typedefs * \ingroup Core_Module * * %Eigen defines several typedef shortcuts for most common 1D and 2D array types. * * The general patterns are the following: * * \c ArrayRowsColsType where \c Rows and \c Cols can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size, * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd * for complex double. * * For example, \c Array33d is a fixed-size 3x3 array type of doubles, and \c ArrayXXf is a dynamic-size matrix of floats. * * There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is * a fixed-size 1D array of 4 complex floats. * * With \cpp11, template alias are also defined for common sizes. * They follow the same pattern as above except that the scalar type suffix is replaced by a * template parameter, i.e.: * - `ArrayRowsCols` where `Rows` and `Cols` can be \c 2,\c 3,\c 4, or \c X for fixed or dynamic size. * - `ArraySize` where `Size` can be \c 2,\c 3,\c 4 or \c X for fixed or dynamic size 1D arrays. * * \sa class Array */ #define EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ /** \ingroup arraytypedefs */ \ typedef Array Array##SizeSuffix##SizeSuffix##TypeSuffix; \ /** \ingroup arraytypedefs */ \ typedef Array Array##SizeSuffix##TypeSuffix; #define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \ /** \ingroup arraytypedefs */ \ typedef Array Array##Size##X##TypeSuffix; \ /** \ingroup arraytypedefs */ \ typedef Array Array##X##Size##TypeSuffix; #define EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 2, 2) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 3, 3) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 4, 4) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 4) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(int, i) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(float, f) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(double, d) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex, cf) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex, cd) #undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES #undef EIGEN_MAKE_ARRAY_TYPEDEFS #undef EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS #if EIGEN_HAS_CXX11 #define EIGEN_MAKE_ARRAY_TYPEDEFS(Size, SizeSuffix) \ /** \ingroup arraytypedefs */ \ /** \brief \cpp11 */ \ template \ using Array##SizeSuffix##SizeSuffix = Array; \ /** \ingroup arraytypedefs */ \ /** \brief \cpp11 */ \ template \ using Array##SizeSuffix = Array; #define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Size) \ /** \ingroup arraytypedefs */ \ /** \brief \cpp11 */ \ template \ using Array##Size##X = Array; \ /** \ingroup arraytypedefs */ \ /** \brief \cpp11 */ \ template \ using Array##X##Size = Array; EIGEN_MAKE_ARRAY_TYPEDEFS(2, 2) EIGEN_MAKE_ARRAY_TYPEDEFS(3, 3) EIGEN_MAKE_ARRAY_TYPEDEFS(4, 4) EIGEN_MAKE_ARRAY_TYPEDEFS(Dynamic, X) EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(2) EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(3) EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(4) #undef EIGEN_MAKE_ARRAY_TYPEDEFS #undef EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS #endif // EIGEN_HAS_CXX11 #define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \ using Eigen::Matrix##SizeSuffix##TypeSuffix; \ using Eigen::Vector##SizeSuffix##TypeSuffix; \ using Eigen::RowVector##SizeSuffix##TypeSuffix; #define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(TypeSuffix) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \ #define EIGEN_USING_ARRAY_TYPEDEFS \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(i) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(f) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd) } // end namespace Eigen #endif // EIGEN_ARRAY_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/ArrayBase.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARRAYBASE_H #define EIGEN_ARRAYBASE_H #include "./InternalHeaderCheck.h" namespace Eigen { template class MatrixWrapper; /** \class ArrayBase * \ingroup Core_Module * * \brief Base class for all 1D and 2D array, and related expressions * * An array is similar to a dense vector or matrix. While matrices are mathematical * objects with well defined linear algebra operators, an array is just a collection * of scalar values arranged in a one or two dimensional fashion. As the main consequence, * all operations applied to an array are performed coefficient wise. Furthermore, * arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient * constructors allowing to easily write generic code working for both scalar values * and arrays. * * This class is the base that is inherited by all array expression types. * * \tparam Derived is the derived type, e.g., an array or an expression type. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAYBASE_PLUGIN. * * \sa class MatrixBase, \ref TopicClassHierarchy */ template class ArrayBase : public DenseBase { public: #ifndef EIGEN_PARSED_BY_DOXYGEN /** The base class for a given storage type. */ typedef ArrayBase StorageBaseType; typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; typedef DenseBase Base; using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; using Base::SizeAtCompileTime; using Base::MaxRowsAtCompileTime; using Base::MaxColsAtCompileTime; using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; using Base::derived; using Base::const_cast_derived; using Base::rows; using Base::cols; using Base::size; using Base::coeff; using Base::coeffRef; using Base::lazyAssign; using Base::operator-; using Base::operator=; using Base::operator+=; using Base::operator-=; using Base::operator*=; using Base::operator/=; typedef typename Base::CoeffReturnType CoeffReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename Base::PlainObject PlainObject; /** \internal Represents a matrix with all coefficients equal to one another*/ typedef CwiseNullaryOp,PlainObject> ConstantReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase #define EIGEN_DOC_UNARY_ADDONS(X,Y) # include "../plugins/MatrixCwiseUnaryOps.h" # include "../plugins/ArrayCwiseUnaryOps.h" # include "../plugins/CommonCwiseBinaryOps.h" # include "../plugins/MatrixCwiseBinaryOps.h" # include "../plugins/ArrayCwiseBinaryOps.h" # ifdef EIGEN_ARRAYBASE_PLUGIN # include EIGEN_ARRAYBASE_PLUGIN # endif #undef EIGEN_CURRENT_STORAGE_BASE_CLASS #undef EIGEN_DOC_UNARY_ADDONS /** Special case of the template operator=, in order to prevent the compiler * from generating a default operator= (issue hit with g++ 4.1) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const ArrayBase& other) { internal::call_assignment(derived(), other.derived()); return derived(); } /** Set all the entries to \a value. * \sa DenseBase::setConstant(), DenseBase::fill() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Scalar &value) { Base::setConstant(value); return derived(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const Scalar& scalar); EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const Scalar& scalar); template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const ArrayBase& other); template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const ArrayBase& other); template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*=(const ArrayBase& other); template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator/=(const ArrayBase& other); public: EIGEN_DEVICE_FUNC ArrayBase& array() { return *this; } EIGEN_DEVICE_FUNC const ArrayBase& array() const { return *this; } /** \returns an \link Eigen::MatrixBase Matrix \endlink expression of this array * \sa MatrixBase::array() */ EIGEN_DEVICE_FUNC MatrixWrapper matrix() { return MatrixWrapper(derived()); } EIGEN_DEVICE_FUNC const MatrixWrapper matrix() const { return MatrixWrapper(derived()); } // template // inline void evalTo(Dest& dst) const { dst = matrix(); } protected: EIGEN_DEFAULT_COPY_CONSTRUCTOR(ArrayBase) EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(ArrayBase) private: explicit ArrayBase(Index); ArrayBase(Index,Index); template explicit ArrayBase(const ArrayBase&); protected: // mixing arrays and matrices is not legal template Derived& operator+=(const MatrixBase& ) {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} // mixing arrays and matrices is not legal template Derived& operator-=(const MatrixBase& ) {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} }; /** replaces \c *this by \c *this - \a other. * * \returns a reference to \c *this */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & ArrayBase::operator-=(const ArrayBase &other) { call_assignment(derived(), other.derived(), internal::sub_assign_op()); return derived(); } /** replaces \c *this by \c *this + \a other. * * \returns a reference to \c *this */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & ArrayBase::operator+=(const ArrayBase& other) { call_assignment(derived(), other.derived(), internal::add_assign_op()); return derived(); } /** replaces \c *this by \c *this * \a other coefficient wise. * * \returns a reference to \c *this */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & ArrayBase::operator*=(const ArrayBase& other) { call_assignment(derived(), other.derived(), internal::mul_assign_op()); return derived(); } /** replaces \c *this by \c *this / \a other coefficient wise. * * \returns a reference to \c *this */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & ArrayBase::operator/=(const ArrayBase& other) { call_assignment(derived(), other.derived(), internal::div_assign_op()); return derived(); } } // end namespace Eigen #endif // EIGEN_ARRAYBASE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/ArrayWrapper.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARRAYWRAPPER_H #define EIGEN_ARRAYWRAPPER_H #include "./InternalHeaderCheck.h" namespace Eigen { /** \class ArrayWrapper * \ingroup Core_Module * * \brief Expression of a mathematical vector or matrix as an array object * * This class is the return type of MatrixBase::array(), and most of the time * this is the only way it is use. * * \sa MatrixBase::array(), class MatrixWrapper */ namespace internal { template struct traits > : public traits::type > { typedef ArrayXpr XprKind; // Let's remove NestByRefBit enum { Flags0 = traits::type >::Flags, LvalueBitFlag = is_lvalue::value ? LvalueBit : 0, Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag }; }; } template class ArrayWrapper : public ArrayBase > { public: typedef ArrayBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper) typedef typename internal::remove_all::type NestedExpression; typedef typename internal::conditional< internal::is_lvalue::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; typedef typename internal::ref_selector::non_const_type NestedExpressionType; using Base::coeffRef; EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {} EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return m_expression.outerStride(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { return m_expression.innerStride(); } EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_expression.data(); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { return m_expression.coeffRef(rowId, colId); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_expression.coeffRef(index); } template EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const { dst = m_expression; } EIGEN_DEVICE_FUNC const typename internal::remove_all::type& nestedExpression() const { return m_expression; } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index) */ EIGEN_DEVICE_FUNC void resize(Index newSize) { m_expression.resize(newSize); } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index,Index)*/ EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { m_expression.resize(rows,cols); } protected: NestedExpressionType m_expression; }; /** \class MatrixWrapper * \ingroup Core_Module * * \brief Expression of an array as a mathematical vector or matrix * * This class is the return type of ArrayBase::matrix(), and most of the time * this is the only way it is use. * * \sa MatrixBase::matrix(), class ArrayWrapper */ namespace internal { template struct traits > : public traits::type > { typedef MatrixXpr XprKind; // Let's remove NestByRefBit enum { Flags0 = traits::type >::Flags, LvalueBitFlag = is_lvalue::value ? LvalueBit : 0, Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag }; }; } template class MatrixWrapper : public MatrixBase > { public: typedef MatrixBase > Base; EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper) typedef typename internal::remove_all::type NestedExpression; typedef typename internal::conditional< internal::is_lvalue::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; typedef typename internal::ref_selector::non_const_type NestedExpressionType; using Base::coeffRef; EIGEN_DEVICE_FUNC explicit inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {} EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return m_expression.outerStride(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { return m_expression.innerStride(); } EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_expression.data(); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { return m_expression.derived().coeffRef(rowId, colId); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_expression.coeffRef(index); } EIGEN_DEVICE_FUNC const typename internal::remove_all::type& nestedExpression() const { return m_expression; } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index) */ EIGEN_DEVICE_FUNC void resize(Index newSize) { m_expression.resize(newSize); } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index,Index)*/ EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { m_expression.resize(rows,cols); } protected: NestedExpressionType m_expression; }; } // end namespace Eigen #endif // EIGEN_ARRAYWRAPPER_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Assign.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007 Michael Olbrich // Copyright (C) 2006-2010 Benoit Jacob // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ASSIGN_H #define EIGEN_ASSIGN_H #include "./InternalHeaderCheck.h" namespace Eigen { template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase ::lazyAssign(const DenseBase& other) { enum{ SameType = internal::is_same::value }; EIGEN_STATIC_ASSERT_LVALUE(Derived) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT(SameType,YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) eigen_assert(rows() == other.rows() && cols() == other.cols()); internal::call_assignment_no_alias(derived(),other.derived()); return derived(); } template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::operator=(const DenseBase& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::operator=(const DenseBase& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const MatrixBase& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const DenseBase& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const EigenBase& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const ReturnByValue& other) { other.derived().evalTo(derived()); return derived(); } } // end namespace Eigen #endif // EIGEN_ASSIGN_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/AssignEvaluator.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Benoit Jacob // Copyright (C) 2011-2014 Gael Guennebaud // Copyright (C) 2011-2012 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ASSIGN_EVALUATOR_H #define EIGEN_ASSIGN_EVALUATOR_H #include "./InternalHeaderCheck.h" namespace Eigen { // This implementation is based on Assign.h namespace internal { /*************************************************************************** * Part 1 : the logic deciding a strategy for traversal and unrolling * ***************************************************************************/ // copy_using_evaluator_traits is based on assign_traits template struct copy_using_evaluator_traits { typedef typename DstEvaluator::XprType Dst; typedef typename Dst::Scalar DstScalar; enum { DstFlags = DstEvaluator::Flags, SrcFlags = SrcEvaluator::Flags }; public: enum { DstAlignment = DstEvaluator::Alignment, SrcAlignment = SrcEvaluator::Alignment, DstHasDirectAccess = (DstFlags & DirectAccessBit) == DirectAccessBit, JointAlignment = EIGEN_PLAIN_ENUM_MIN(DstAlignment,SrcAlignment) }; private: enum { InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime) : int(DstFlags)&RowMajorBit ? int(Dst::ColsAtCompileTime) : int(Dst::RowsAtCompileTime), InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime) : int(DstFlags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime) : int(Dst::MaxRowsAtCompileTime), RestrictedInnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(InnerSize,MaxPacketSize), RestrictedLinearSize = EIGEN_SIZE_MIN_PREFER_FIXED(Dst::SizeAtCompileTime,MaxPacketSize), OuterStride = int(outer_stride_at_compile_time::ret), MaxSizeAtCompileTime = Dst::SizeAtCompileTime }; // TODO distinguish between linear traversal and inner-traversals typedef typename find_best_packet::type LinearPacketType; typedef typename find_best_packet::type InnerPacketType; enum { LinearPacketSize = unpacket_traits::size, InnerPacketSize = unpacket_traits::size }; public: enum { LinearRequiredAlignment = unpacket_traits::alignment, InnerRequiredAlignment = unpacket_traits::alignment }; private: enum { DstIsRowMajor = DstFlags&RowMajorBit, SrcIsRowMajor = SrcFlags&RowMajorBit, StorageOrdersAgree = (int(DstIsRowMajor) == int(SrcIsRowMajor)), MightVectorize = bool(StorageOrdersAgree) && (int(DstFlags) & int(SrcFlags) & ActualPacketAccessBit) && bool(functor_traits::PacketAccess), MayInnerVectorize = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(InnerPacketSize)==0 && int(OuterStride)!=Dynamic && int(OuterStride)%int(InnerPacketSize)==0 && (EIGEN_UNALIGNED_VECTORIZE || int(JointAlignment)>=int(InnerRequiredAlignment)), MayLinearize = bool(StorageOrdersAgree) && (int(DstFlags) & int(SrcFlags) & LinearAccessBit), MayLinearVectorize = bool(MightVectorize) && bool(MayLinearize) && bool(DstHasDirectAccess) && (EIGEN_UNALIGNED_VECTORIZE || (int(DstAlignment)>=int(LinearRequiredAlignment)) || MaxSizeAtCompileTime == Dynamic), /* If the destination isn't aligned, we have to do runtime checks and we don't unroll, so it's only good for large enough sizes. */ MaySliceVectorize = bool(MightVectorize) && bool(DstHasDirectAccess) && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=(EIGEN_UNALIGNED_VECTORIZE?InnerPacketSize:(3*InnerPacketSize))) /* slice vectorization can be slow, so we only want it if the slices are big, which is indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block in a fixed-size matrix However, with EIGEN_UNALIGNED_VECTORIZE and unrolling, slice vectorization is still worth it */ }; public: enum { Traversal = int(Dst::SizeAtCompileTime) == 0 ? int(AllAtOnceTraversal) // If compile-size is zero, traversing will fail at compile-time. : (int(MayLinearVectorize) && (LinearPacketSize>InnerPacketSize)) ? int(LinearVectorizedTraversal) : int(MayInnerVectorize) ? int(InnerVectorizedTraversal) : int(MayLinearVectorize) ? int(LinearVectorizedTraversal) : int(MaySliceVectorize) ? int(SliceVectorizedTraversal) : int(MayLinearize) ? int(LinearTraversal) : int(DefaultTraversal), Vectorized = int(Traversal) == InnerVectorizedTraversal || int(Traversal) == LinearVectorizedTraversal || int(Traversal) == SliceVectorizedTraversal }; typedef typename conditional::type PacketType; private: enum { ActualPacketSize = int(Traversal)==LinearVectorizedTraversal ? LinearPacketSize : Vectorized ? InnerPacketSize : 1, UnrollingLimit = EIGEN_UNROLLING_LIMIT * ActualPacketSize, MayUnrollCompletely = int(Dst::SizeAtCompileTime) != Dynamic && int(Dst::SizeAtCompileTime) * (int(DstEvaluator::CoeffReadCost)+int(SrcEvaluator::CoeffReadCost)) <= int(UnrollingLimit), MayUnrollInner = int(InnerSize) != Dynamic && int(InnerSize) * (int(DstEvaluator::CoeffReadCost)+int(SrcEvaluator::CoeffReadCost)) <= int(UnrollingLimit) }; public: enum { Unrolling = (int(Traversal) == int(InnerVectorizedTraversal) || int(Traversal) == int(DefaultTraversal)) ? ( int(MayUnrollCompletely) ? int(CompleteUnrolling) : int(MayUnrollInner) ? int(InnerUnrolling) : int(NoUnrolling) ) : int(Traversal) == int(LinearVectorizedTraversal) ? ( bool(MayUnrollCompletely) && ( EIGEN_UNALIGNED_VECTORIZE || (int(DstAlignment)>=int(LinearRequiredAlignment))) ? int(CompleteUnrolling) : int(NoUnrolling) ) : int(Traversal) == int(LinearTraversal) ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling) : int(NoUnrolling) ) #if EIGEN_UNALIGNED_VECTORIZE : int(Traversal) == int(SliceVectorizedTraversal) ? ( bool(MayUnrollInner) ? int(InnerUnrolling) : int(NoUnrolling) ) #endif : int(NoUnrolling) }; #ifdef EIGEN_DEBUG_ASSIGN static void debug() { std::cerr << "DstXpr: " << typeid(typename DstEvaluator::XprType).name() << std::endl; std::cerr << "SrcXpr: " << typeid(typename SrcEvaluator::XprType).name() << std::endl; std::cerr.setf(std::ios::hex, std::ios::basefield); std::cerr << "DstFlags" << " = " << DstFlags << " (" << demangle_flags(DstFlags) << " )" << std::endl; std::cerr << "SrcFlags" << " = " << SrcFlags << " (" << demangle_flags(SrcFlags) << " )" << std::endl; std::cerr.unsetf(std::ios::hex); EIGEN_DEBUG_VAR(DstAlignment) EIGEN_DEBUG_VAR(SrcAlignment) EIGEN_DEBUG_VAR(LinearRequiredAlignment) EIGEN_DEBUG_VAR(InnerRequiredAlignment) EIGEN_DEBUG_VAR(JointAlignment) EIGEN_DEBUG_VAR(InnerSize) EIGEN_DEBUG_VAR(InnerMaxSize) EIGEN_DEBUG_VAR(LinearPacketSize) EIGEN_DEBUG_VAR(InnerPacketSize) EIGEN_DEBUG_VAR(ActualPacketSize) EIGEN_DEBUG_VAR(StorageOrdersAgree) EIGEN_DEBUG_VAR(MightVectorize) EIGEN_DEBUG_VAR(MayLinearize) EIGEN_DEBUG_VAR(MayInnerVectorize) EIGEN_DEBUG_VAR(MayLinearVectorize) EIGEN_DEBUG_VAR(MaySliceVectorize) std::cerr << "Traversal" << " = " << Traversal << " (" << demangle_traversal(Traversal) << ")" << std::endl; EIGEN_DEBUG_VAR(SrcEvaluator::CoeffReadCost) EIGEN_DEBUG_VAR(DstEvaluator::CoeffReadCost) EIGEN_DEBUG_VAR(Dst::SizeAtCompileTime) EIGEN_DEBUG_VAR(UnrollingLimit) EIGEN_DEBUG_VAR(MayUnrollCompletely) EIGEN_DEBUG_VAR(MayUnrollInner) std::cerr << "Unrolling" << " = " << Unrolling << " (" << demangle_unrolling(Unrolling) << ")" << std::endl; std::cerr << std::endl; } #endif }; /*************************************************************************** * Part 2 : meta-unrollers ***************************************************************************/ /************************ *** Default traversal *** ************************/ template struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling { // FIXME: this is not very clean, perhaps this information should be provided by the kernel? typedef typename Kernel::DstEvaluatorType DstEvaluatorType; typedef typename DstEvaluatorType::XprType DstXprType; enum { outer = Index / DstXprType::InnerSizeAtCompileTime, inner = Index % DstXprType::InnerSizeAtCompileTime }; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { kernel.assignCoeffByOuterInner(outer, inner); copy_using_evaluator_DefaultTraversal_CompleteUnrolling::run(kernel); } }; template struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { } }; template struct copy_using_evaluator_DefaultTraversal_InnerUnrolling { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer) { kernel.assignCoeffByOuterInner(outer, Index_); copy_using_evaluator_DefaultTraversal_InnerUnrolling::run(kernel, outer); } }; template struct copy_using_evaluator_DefaultTraversal_InnerUnrolling { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index) { } }; /*********************** *** Linear traversal *** ***********************/ template struct copy_using_evaluator_LinearTraversal_CompleteUnrolling { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel) { kernel.assignCoeff(Index); copy_using_evaluator_LinearTraversal_CompleteUnrolling::run(kernel); } }; template struct copy_using_evaluator_LinearTraversal_CompleteUnrolling { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { } }; /************************** *** Inner vectorization *** **************************/ template struct copy_using_evaluator_innervec_CompleteUnrolling { // FIXME: this is not very clean, perhaps this information should be provided by the kernel? typedef typename Kernel::DstEvaluatorType DstEvaluatorType; typedef typename DstEvaluatorType::XprType DstXprType; typedef typename Kernel::PacketType PacketType; enum { outer = Index / DstXprType::InnerSizeAtCompileTime, inner = Index % DstXprType::InnerSizeAtCompileTime, SrcAlignment = Kernel::AssignmentTraits::SrcAlignment, DstAlignment = Kernel::AssignmentTraits::DstAlignment }; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { kernel.template assignPacketByOuterInner(outer, inner); enum { NextIndex = Index + unpacket_traits::size }; copy_using_evaluator_innervec_CompleteUnrolling::run(kernel); } }; template struct copy_using_evaluator_innervec_CompleteUnrolling { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { } }; template struct copy_using_evaluator_innervec_InnerUnrolling { typedef typename Kernel::PacketType PacketType; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer) { kernel.template assignPacketByOuterInner(outer, Index_); enum { NextIndex = Index_ + unpacket_traits::size }; copy_using_evaluator_innervec_InnerUnrolling::run(kernel, outer); } }; template struct copy_using_evaluator_innervec_InnerUnrolling { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &, Index) { } }; /*************************************************************************** * Part 3 : implementation of all cases ***************************************************************************/ // dense_assignment_loop is based on assign_impl template struct dense_assignment_loop; /************************ ***** Special Cases ***** ************************/ // Zero-sized assignment is a no-op. template struct dense_assignment_loop { EIGEN_DEVICE_FUNC static void EIGEN_STRONG_INLINE run(Kernel& /*kernel*/) { EIGEN_STATIC_ASSERT(int(Kernel::DstEvaluatorType::XprType::SizeAtCompileTime) == 0, EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT) } }; /************************ *** Default traversal *** ************************/ template struct dense_assignment_loop { EIGEN_DEVICE_FUNC static void EIGEN_STRONG_INLINE run(Kernel &kernel) { for(Index outer = 0; outer < kernel.outerSize(); ++outer) { for(Index inner = 0; inner < kernel.innerSize(); ++inner) { kernel.assignCoeffByOuterInner(outer, inner); } } } }; template struct dense_assignment_loop { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; copy_using_evaluator_DefaultTraversal_CompleteUnrolling::run(kernel); } }; template struct dense_assignment_loop { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; const Index outerSize = kernel.outerSize(); for(Index outer = 0; outer < outerSize; ++outer) copy_using_evaluator_DefaultTraversal_InnerUnrolling::run(kernel, outer); } }; /*************************** *** Linear vectorization *** ***************************/ // The goal of unaligned_dense_assignment_loop is simply to factorize the handling // of the non vectorizable beginning and ending parts template struct unaligned_dense_assignment_loop { // if IsAligned = true, then do nothing template EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index, Index) {} }; template <> struct unaligned_dense_assignment_loop { // MSVC must not inline this functions. If it does, it fails to optimize the // packet access path. // FIXME check which version exhibits this issue #if EIGEN_COMP_MSVC template static EIGEN_DONT_INLINE void run(Kernel &kernel, Index start, Index end) #else template EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index start, Index end) #endif { for (Index index = start; index < end; ++index) kernel.assignCoeff(index); } }; template struct dense_assignment_loop { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { const Index size = kernel.size(); typedef typename Kernel::Scalar Scalar; typedef typename Kernel::PacketType PacketType; enum { requestedAlignment = Kernel::AssignmentTraits::LinearRequiredAlignment, packetSize = unpacket_traits::size, dstIsAligned = int(Kernel::AssignmentTraits::DstAlignment)>=int(requestedAlignment), dstAlignment = packet_traits::AlignedOnScalar ? int(requestedAlignment) : int(Kernel::AssignmentTraits::DstAlignment), srcAlignment = Kernel::AssignmentTraits::JointAlignment }; const Index alignedStart = dstIsAligned ? 0 : internal::first_aligned(kernel.dstDataPtr(), size); const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize; unaligned_dense_assignment_loop::run(kernel, 0, alignedStart); for(Index index = alignedStart; index < alignedEnd; index += packetSize) kernel.template assignPacket(index); unaligned_dense_assignment_loop<>::run(kernel, alignedEnd, size); } }; template struct dense_assignment_loop { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; typedef typename Kernel::PacketType PacketType; enum { size = DstXprType::SizeAtCompileTime, packetSize =unpacket_traits::size, alignedSize = (int(size)/packetSize)*packetSize }; copy_using_evaluator_innervec_CompleteUnrolling::run(kernel); copy_using_evaluator_DefaultTraversal_CompleteUnrolling::run(kernel); } }; /************************** *** Inner vectorization *** **************************/ template struct dense_assignment_loop { typedef typename Kernel::PacketType PacketType; enum { SrcAlignment = Kernel::AssignmentTraits::SrcAlignment, DstAlignment = Kernel::AssignmentTraits::DstAlignment }; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { const Index innerSize = kernel.innerSize(); const Index outerSize = kernel.outerSize(); const Index packetSize = unpacket_traits::size; for(Index outer = 0; outer < outerSize; ++outer) for(Index inner = 0; inner < innerSize; inner+=packetSize) kernel.template assignPacketByOuterInner(outer, inner); } }; template struct dense_assignment_loop { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; copy_using_evaluator_innervec_CompleteUnrolling::run(kernel); } }; template struct dense_assignment_loop { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; typedef typename Kernel::AssignmentTraits Traits; const Index outerSize = kernel.outerSize(); for(Index outer = 0; outer < outerSize; ++outer) copy_using_evaluator_innervec_InnerUnrolling::run(kernel, outer); } }; /*********************** *** Linear traversal *** ***********************/ template struct dense_assignment_loop { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { const Index size = kernel.size(); for(Index i = 0; i < size; ++i) kernel.assignCoeff(i); } }; template struct dense_assignment_loop { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; copy_using_evaluator_LinearTraversal_CompleteUnrolling::run(kernel); } }; /************************** *** Slice vectorization *** ***************************/ template struct dense_assignment_loop { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::Scalar Scalar; typedef typename Kernel::PacketType PacketType; enum { packetSize = unpacket_traits::size, requestedAlignment = int(Kernel::AssignmentTraits::InnerRequiredAlignment), alignable = packet_traits::AlignedOnScalar || int(Kernel::AssignmentTraits::DstAlignment)>=sizeof(Scalar), dstIsAligned = int(Kernel::AssignmentTraits::DstAlignment)>=int(requestedAlignment), dstAlignment = alignable ? int(requestedAlignment) : int(Kernel::AssignmentTraits::DstAlignment) }; const Scalar *dst_ptr = kernel.dstDataPtr(); if((!bool(dstIsAligned)) && (UIntPtr(dst_ptr) % sizeof(Scalar))>0) { // the pointer is not aligned-on scalar, so alignment is not possible return dense_assignment_loop::run(kernel); } const Index packetAlignedMask = packetSize - 1; const Index innerSize = kernel.innerSize(); const Index outerSize = kernel.outerSize(); const Index alignedStep = alignable ? (packetSize - kernel.outerStride() % packetSize) & packetAlignedMask : 0; Index alignedStart = ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned(dst_ptr, innerSize); for(Index outer = 0; outer < outerSize; ++outer) { const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask); // do the non-vectorizable part of the assignment for(Index inner = 0; inner(outer, inner); // do the non-vectorizable part of the assignment for(Index inner = alignedEnd; inner struct dense_assignment_loop { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; typedef typename Kernel::PacketType PacketType; enum { innerSize = DstXprType::InnerSizeAtCompileTime, packetSize =unpacket_traits::size, vectorizableSize = (int(innerSize) / int(packetSize)) * int(packetSize), size = DstXprType::SizeAtCompileTime }; for(Index outer = 0; outer < kernel.outerSize(); ++outer) { copy_using_evaluator_innervec_InnerUnrolling::run(kernel, outer); copy_using_evaluator_DefaultTraversal_InnerUnrolling::run(kernel, outer); } } }; #endif /*************************************************************************** * Part 4 : Generic dense assignment kernel ***************************************************************************/ // This class generalize the assignment of a coefficient (or packet) from one dense evaluator // to another dense writable evaluator. // It is parametrized by the two evaluators, and the actual assignment functor. // This abstraction level permits to keep the evaluation loops as simple and as generic as possible. // One can customize the assignment using this generic dense_assignment_kernel with different // functors, or by completely overloading it, by-passing a functor. template class generic_dense_assignment_kernel { protected: typedef typename DstEvaluatorTypeT::XprType DstXprType; typedef typename SrcEvaluatorTypeT::XprType SrcXprType; public: typedef DstEvaluatorTypeT DstEvaluatorType; typedef SrcEvaluatorTypeT SrcEvaluatorType; typedef typename DstEvaluatorType::Scalar Scalar; typedef copy_using_evaluator_traits AssignmentTraits; typedef typename AssignmentTraits::PacketType PacketType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE generic_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr) : m_dst(dst), m_src(src), m_functor(func), m_dstExpr(dstExpr) { #ifdef EIGEN_DEBUG_ASSIGN AssignmentTraits::debug(); #endif } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index size() const EIGEN_NOEXCEPT { return m_dstExpr.size(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index innerSize() const EIGEN_NOEXCEPT { return m_dstExpr.innerSize(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index outerSize() const EIGEN_NOEXCEPT { return m_dstExpr.outerSize(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_dstExpr.rows(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_dstExpr.cols(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index outerStride() const EIGEN_NOEXCEPT { return m_dstExpr.outerStride(); } EIGEN_DEVICE_FUNC DstEvaluatorType& dstEvaluator() EIGEN_NOEXCEPT { return m_dst; } EIGEN_DEVICE_FUNC const SrcEvaluatorType& srcEvaluator() const EIGEN_NOEXCEPT { return m_src; } /// Assign src(row,col) to dst(row,col) through the assignment functor. EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index row, Index col) { m_functor.assignCoeff(m_dst.coeffRef(row,col), m_src.coeff(row,col)); } /// \sa assignCoeff(Index,Index) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index index) { m_functor.assignCoeff(m_dst.coeffRef(index), m_src.coeff(index)); } /// \sa assignCoeff(Index,Index) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeffByOuterInner(Index outer, Index inner) { Index row = rowIndexByOuterInner(outer, inner); Index col = colIndexByOuterInner(outer, inner); assignCoeff(row, col); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index row, Index col) { m_functor.template assignPacket(&m_dst.coeffRef(row,col), m_src.template packet(row,col)); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index index) { m_functor.template assignPacket(&m_dst.coeffRef(index), m_src.template packet(index)); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacketByOuterInner(Index outer, Index inner) { Index row = rowIndexByOuterInner(outer, inner); Index col = colIndexByOuterInner(outer, inner); assignPacket(row, col); } EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) { typedef typename DstEvaluatorType::ExpressionTraits Traits; return int(Traits::RowsAtCompileTime) == 1 ? 0 : int(Traits::ColsAtCompileTime) == 1 ? inner : int(DstEvaluatorType::Flags)&RowMajorBit ? outer : inner; } EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) { typedef typename DstEvaluatorType::ExpressionTraits Traits; return int(Traits::ColsAtCompileTime) == 1 ? 0 : int(Traits::RowsAtCompileTime) == 1 ? inner : int(DstEvaluatorType::Flags)&RowMajorBit ? inner : outer; } EIGEN_DEVICE_FUNC const Scalar* dstDataPtr() const { return m_dstExpr.data(); } protected: DstEvaluatorType& m_dst; const SrcEvaluatorType& m_src; const Functor &m_functor; // TODO find a way to avoid the needs of the original expression DstXprType& m_dstExpr; }; // Special kernel used when computing small products whose operands have dynamic dimensions. It ensures that the // PacketSize used is no larger than 4, thereby increasing the chance that vectorized instructions will be used // when computing the product. template class restricted_packet_dense_assignment_kernel : public generic_dense_assignment_kernel { protected: typedef generic_dense_assignment_kernel Base; public: typedef typename Base::Scalar Scalar; typedef typename Base::DstXprType DstXprType; typedef copy_using_evaluator_traits AssignmentTraits; typedef typename AssignmentTraits::PacketType PacketType; EIGEN_DEVICE_FUNC restricted_packet_dense_assignment_kernel(DstEvaluatorTypeT &dst, const SrcEvaluatorTypeT &src, const Functor &func, DstXprType& dstExpr) : Base(dst, src, func, dstExpr) { } }; /*************************************************************************** * Part 5 : Entry point for dense rectangular assignment ***************************************************************************/ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize_if_allowed(DstXprType &dst, const SrcXprType& src, const Functor &/*func*/) { EIGEN_ONLY_USED_FOR_DEBUG(dst); EIGEN_ONLY_USED_FOR_DEBUG(src); eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize_if_allowed(DstXprType &dst, const SrcXprType& src, const internal::assign_op &/*func*/) { Index dstRows = src.rows(); Index dstCols = src.cols(); if(((dst.rows()!=dstRows) || (dst.cols()!=dstCols))) dst.resize(dstRows, dstCols); eigen_assert(dst.rows() == dstRows && dst.cols() == dstCols); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType& dst, const SrcXprType& src, const Functor &func) { typedef evaluator DstEvaluatorType; typedef evaluator SrcEvaluatorType; SrcEvaluatorType srcEvaluator(src); // NOTE To properly handle A = (A*A.transpose())/s with A rectangular, // we need to resize the destination after the source evaluator has been created. resize_if_allowed(dst, src, func); DstEvaluatorType dstEvaluator(dst); typedef generic_dense_assignment_kernel Kernel; Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived()); dense_assignment_loop::run(kernel); } // Specialization for filling the destination with a constant value. #ifndef EIGEN_GPU_COMPILE_PHASE template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType& dst, const Eigen::CwiseNullaryOp, DstXprType>& src, const internal::assign_op& func) { resize_if_allowed(dst, src, func); std::fill_n(dst.data(), dst.size(), src.functor()()); } #endif template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType& dst, const SrcXprType& src) { call_dense_assignment_loop(dst, src, internal::assign_op()); } /*************************************************************************** * Part 6 : Generic assignment ***************************************************************************/ // Based on the respective shapes of the destination and source, // the class AssignmentKind determine the kind of assignment mechanism. // AssignmentKind must define a Kind typedef. template struct AssignmentKind; // Assignment kind defined in this file: struct Dense2Dense {}; struct EigenBase2EigenBase {}; template struct AssignmentKind { typedef EigenBase2EigenBase Kind; }; template<> struct AssignmentKind { typedef Dense2Dense Kind; }; // This is the main assignment class template< typename DstXprType, typename SrcXprType, typename Functor, typename Kind = typename AssignmentKind< typename evaluator_traits::Shape , typename evaluator_traits::Shape >::Kind, typename EnableIf = void> struct Assignment; // The only purpose of this call_assignment() function is to deal with noalias() / "assume-aliasing" and automatic transposition. // Indeed, I (Gael) think that this concept of "assume-aliasing" was a mistake, and it makes thing quite complicated. // So this intermediate function removes everything related to "assume-aliasing" such that Assignment // does not has to bother about these annoying details. template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment(Dst& dst, const Src& src) { call_assignment(dst, src, internal::assign_op()); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment(const Dst& dst, const Src& src) { call_assignment(dst, src, internal::assign_op()); } // Deal with "assume-aliasing" template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment(Dst& dst, const Src& src, const Func& func, typename enable_if< evaluator_assume_aliasing::value, void*>::type = 0) { typename plain_matrix_type::type tmp(src); call_assignment_no_alias(dst, tmp, func); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment(Dst& dst, const Src& src, const Func& func, typename enable_if::value, void*>::type = 0) { call_assignment_no_alias(dst, src, func); } // by-pass "assume-aliasing" // When there is no aliasing, we require that 'dst' has been properly resized template class StorageBase, typename Src, typename Func> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment(NoAlias& dst, const Src& src, const Func& func) { call_assignment_no_alias(dst.expression(), src, func); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias(Dst& dst, const Src& src, const Func& func) { enum { NeedToTranspose = ( (int(Dst::RowsAtCompileTime) == 1 && int(Src::ColsAtCompileTime) == 1) || (int(Dst::ColsAtCompileTime) == 1 && int(Src::RowsAtCompileTime) == 1) ) && int(Dst::SizeAtCompileTime) != 1 }; typedef typename internal::conditional, Dst>::type ActualDstTypeCleaned; typedef typename internal::conditional, Dst&>::type ActualDstType; ActualDstType actualDst(dst); // TODO check whether this is the right place to perform these checks: EIGEN_STATIC_ASSERT_LVALUE(Dst) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(ActualDstTypeCleaned,Src) EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename ActualDstTypeCleaned::Scalar,typename Src::Scalar); Assignment::run(actualDst, src, func); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_restricted_packet_assignment_no_alias(Dst& dst, const Src& src, const Func& func) { typedef evaluator DstEvaluatorType; typedef evaluator SrcEvaluatorType; typedef restricted_packet_dense_assignment_kernel Kernel; EIGEN_STATIC_ASSERT_LVALUE(Dst) EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename Dst::Scalar,typename Src::Scalar); SrcEvaluatorType srcEvaluator(src); resize_if_allowed(dst, src, func); DstEvaluatorType dstEvaluator(dst); Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived()); dense_assignment_loop::run(kernel); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias(Dst& dst, const Src& src) { call_assignment_no_alias(dst, src, internal::assign_op()); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias_no_transpose(Dst& dst, const Src& src, const Func& func) { // TODO check whether this is the right place to perform these checks: EIGEN_STATIC_ASSERT_LVALUE(Dst) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Dst,Src) EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename Dst::Scalar,typename Src::Scalar); Assignment::run(dst, src, func); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias_no_transpose(Dst& dst, const Src& src) { call_assignment_no_alias_no_transpose(dst, src, internal::assign_op()); } // forward declaration template void check_for_aliasing(const Dst &dst, const Src &src); // Generic Dense to Dense assignment // Note that the last template argument "Weak" is needed to make it possible to perform // both partial specialization+SFINAE without ambiguous specialization template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak> struct Assignment { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const Functor &func) { #ifndef EIGEN_NO_DEBUG internal::check_for_aliasing(dst, src); #endif call_dense_assignment_loop(dst, src, func); } }; // Generic assignment through evalTo. // TODO: not sure we have to keep that one, but it helps porting current code to new evaluator mechanism. // Note that the last template argument "Weak" is needed to make it possible to perform // both partial specialization+SFINAE without ambiguous specialization template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak> struct Assignment { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &/*func*/) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); src.evalTo(dst); } // NOTE The following two functions are templated to avoid their instantiation if not needed // This is needed because some expressions supports evalTo only and/or have 'void' as scalar type. template EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op &/*func*/) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); src.addTo(dst); } template EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op &/*func*/) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); src.subTo(dst); } }; } // namespace internal } // end namespace Eigen #endif // EIGEN_ASSIGN_EVALUATOR_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Assign_MKL.h ================================================ /* Copyright (c) 2011, Intel Corporation. All rights reserved. Copyright (C) 2015 Gael Guennebaud Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to Intel(R) MKL * MKL VML support for coefficient-wise unary Eigen expressions like a=b.sin() ******************************************************************************** */ #ifndef EIGEN_ASSIGN_VML_H #define EIGEN_ASSIGN_VML_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template class vml_assign_traits { private: enum { DstHasDirectAccess = Dst::Flags & DirectAccessBit, SrcHasDirectAccess = Src::Flags & DirectAccessBit, StorageOrdersAgree = (int(Dst::IsRowMajor) == int(Src::IsRowMajor)), InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime) : int(Dst::Flags)&RowMajorBit ? int(Dst::ColsAtCompileTime) : int(Dst::RowsAtCompileTime), InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime) : int(Dst::Flags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime) : int(Dst::MaxRowsAtCompileTime), MaxSizeAtCompileTime = Dst::SizeAtCompileTime, MightEnableVml = StorageOrdersAgree && DstHasDirectAccess && SrcHasDirectAccess && Src::InnerStrideAtCompileTime==1 && Dst::InnerStrideAtCompileTime==1, MightLinearize = MightEnableVml && (int(Dst::Flags) & int(Src::Flags) & LinearAccessBit), VmlSize = MightLinearize ? MaxSizeAtCompileTime : InnerMaxSize, LargeEnough = VmlSize==Dynamic || VmlSize>=EIGEN_MKL_VML_THRESHOLD }; public: enum { EnableVml = MightEnableVml && LargeEnough, Traversal = MightLinearize ? LinearTraversal : DefaultTraversal }; }; #define EIGEN_PP_EXPAND(ARG) ARG #if !defined (EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1) #define EIGEN_VMLMODE_EXPAND_xLA , VML_HA #else #define EIGEN_VMLMODE_EXPAND_xLA , VML_LA #endif #define EIGEN_VMLMODE_EXPAND_x_ #define EIGEN_VMLMODE_PREFIX_xLA vm #define EIGEN_VMLMODE_PREFIX_x_ v #define EIGEN_VMLMODE_PREFIX(VMLMODE) EIGEN_CAT(EIGEN_VMLMODE_PREFIX_x,VMLMODE) #define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \ template< typename DstXprType, typename SrcXprNested> \ struct Assignment, SrcXprNested>, assign_op, \ Dense2Dense, typename enable_if::EnableVml>::type> { \ typedef CwiseUnaryOp, SrcXprNested> SrcXprType; \ static void run(DstXprType &dst, const SrcXprType &src, const assign_op &func) { \ resize_if_allowed(dst, src, func); \ eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \ if(vml_assign_traits::Traversal==LinearTraversal) { \ VMLOP(dst.size(), (const VMLTYPE*)src.nestedExpression().data(), \ (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE) ); \ } else { \ const Index outerSize = dst.outerSize(); \ for(Index outer = 0; outer < outerSize; ++outer) { \ const EIGENTYPE *src_ptr = src.IsRowMajor ? &(src.nestedExpression().coeffRef(outer,0)) : \ &(src.nestedExpression().coeffRef(0, outer)); \ EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer)); \ VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr, \ (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \ } \ } \ } \ }; \ #define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE) \ EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),s##VMLOP), float, float, VMLMODE) \ EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),d##VMLOP), double, double, VMLMODE) #define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE) \ EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),c##VMLOP), scomplex, MKL_Complex8, VMLMODE) \ EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),z##VMLOP), dcomplex, MKL_Complex16, VMLMODE) #define EIGEN_MKL_VML_DECLARE_UNARY_CALLS(EIGENOP, VMLOP, VMLMODE) \ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE) \ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sin, Sin, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(asin, Asin, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sinh, Sinh, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(cos, Cos, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(acos, Acos, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(cosh, Cosh, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(tan, Tan, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(atan, Atan, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(tanh, Tanh, LA) // EIGEN_MKL_VML_DECLARE_UNARY_CALLS(abs, Abs, _) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(exp, Exp, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(log, Ln, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(log10, Log10, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sqrt, Sqrt, _) EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr, _) EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(arg, Arg, _) EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(round, Round, _) EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(floor, Floor, _) EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(ceil, Ceil, _) #define EIGEN_MKL_VML_DECLARE_POW_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \ template< typename DstXprType, typename SrcXprNested, typename Plain> \ struct Assignment, SrcXprNested, \ const CwiseNullaryOp,Plain> >, assign_op, \ Dense2Dense, typename enable_if::EnableVml>::type> { \ typedef CwiseBinaryOp, SrcXprNested, \ const CwiseNullaryOp,Plain> > SrcXprType; \ static void run(DstXprType &dst, const SrcXprType &src, const assign_op &func) { \ resize_if_allowed(dst, src, func); \ eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \ VMLTYPE exponent = reinterpret_cast(src.rhs().functor().m_other); \ if(vml_assign_traits::Traversal==LinearTraversal) \ { \ VMLOP( dst.size(), (const VMLTYPE*)src.lhs().data(), exponent, \ (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE) ); \ } else { \ const Index outerSize = dst.outerSize(); \ for(Index outer = 0; outer < outerSize; ++outer) { \ const EIGENTYPE *src_ptr = src.IsRowMajor ? &(src.lhs().coeffRef(outer,0)) : \ &(src.lhs().coeffRef(0, outer)); \ EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer)); \ VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr, exponent, \ (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_x##VMLMODE)); \ } \ } \ } \ }; EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmsPowx, float, float, LA) EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdPowx, double, double, LA) EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcPowx, scomplex, MKL_Complex8, LA) EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmzPowx, dcomplex, MKL_Complex16, LA) } // end namespace internal } // end namespace Eigen #endif // EIGEN_ASSIGN_VML_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/BandMatrix.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BANDMATRIX_H #define EIGEN_BANDMATRIX_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template class BandMatrixBase : public EigenBase { public: enum { Flags = internal::traits::Flags, CoeffReadCost = internal::traits::CoeffReadCost, RowsAtCompileTime = internal::traits::RowsAtCompileTime, ColsAtCompileTime = internal::traits::ColsAtCompileTime, MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime, Supers = internal::traits::Supers, Subs = internal::traits::Subs, Options = internal::traits::Options }; typedef typename internal::traits::Scalar Scalar; typedef Matrix DenseMatrixType; typedef typename DenseMatrixType::StorageIndex StorageIndex; typedef typename internal::traits::CoefficientsType CoefficientsType; typedef EigenBase Base; protected: enum { DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic, SizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime) }; public: using Base::derived; using Base::rows; using Base::cols; /** \returns the number of super diagonals */ inline Index supers() const { return derived().supers(); } /** \returns the number of sub diagonals */ inline Index subs() const { return derived().subs(); } /** \returns an expression of the underlying coefficient matrix */ inline const CoefficientsType& coeffs() const { return derived().coeffs(); } /** \returns an expression of the underlying coefficient matrix */ inline CoefficientsType& coeffs() { return derived().coeffs(); } /** \returns a vector expression of the \a i -th column, * only the meaningful part is returned. * \warning the internal storage must be column major. */ inline Block col(Index i) { EIGEN_STATIC_ASSERT((int(Options) & int(RowMajor)) == 0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); Index start = 0; Index len = coeffs().rows(); if (i<=supers()) { start = supers()-i; len = (std::min)(rows(),std::max(0,coeffs().rows() - (supers()-i))); } else if (i>=rows()-subs()) len = std::max(0,coeffs().rows() - (i + 1 - rows() + subs())); return Block(coeffs(), start, i, len, 1); } /** \returns a vector expression of the main diagonal */ inline Block diagonal() { return Block(coeffs(),supers(),0,1,(std::min)(rows(),cols())); } /** \returns a vector expression of the main diagonal (const version) */ inline const Block diagonal() const { return Block(coeffs(),supers(),0,1,(std::min)(rows(),cols())); } template struct DiagonalIntReturnType { enum { ReturnOpposite = (int(Options) & int(SelfAdjoint)) && (((Index) > 0 && Supers == 0) || ((Index) < 0 && Subs == 0)), Conjugate = ReturnOpposite && NumTraits::IsComplex, ActualIndex = ReturnOpposite ? -Index : Index, DiagonalSize = (RowsAtCompileTime==Dynamic || ColsAtCompileTime==Dynamic) ? Dynamic : (ActualIndex<0 ? EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime, RowsAtCompileTime + ActualIndex) : EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime - ActualIndex)) }; typedef Block BuildType; typedef typename internal::conditional,BuildType >, BuildType>::type Type; }; /** \returns a vector expression of the \a N -th sub or super diagonal */ template inline typename DiagonalIntReturnType::Type diagonal() { return typename DiagonalIntReturnType::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N)); } /** \returns a vector expression of the \a N -th sub or super diagonal */ template inline const typename DiagonalIntReturnType::Type diagonal() const { return typename DiagonalIntReturnType::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N)); } /** \returns a vector expression of the \a i -th sub or super diagonal */ inline Block diagonal(Index i) { eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); return Block(coeffs(), supers()-i, std::max(0,i), 1, diagonalLength(i)); } /** \returns a vector expression of the \a i -th sub or super diagonal */ inline const Block diagonal(Index i) const { eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); return Block(coeffs(), supers()-i, std::max(0,i), 1, diagonalLength(i)); } template inline void evalTo(Dest& dst) const { dst.resize(rows(),cols()); dst.setZero(); dst.diagonal() = diagonal(); for (Index i=1; i<=supers();++i) dst.diagonal(i) = diagonal(i); for (Index i=1; i<=subs();++i) dst.diagonal(-i) = diagonal(-i); } DenseMatrixType toDenseMatrix() const { DenseMatrixType res(rows(),cols()); evalTo(res); return res; } protected: inline Index diagonalLength(Index i) const { return i<0 ? (std::min)(cols(),rows()+i) : (std::min)(rows(),cols()-i); } }; /** * \class BandMatrix * \ingroup Core_Module * * \brief Represents a rectangular matrix with a banded storage * * \tparam Scalar_ Numeric type, i.e. float, double, int * \tparam Rows_ Number of rows, or \b Dynamic * \tparam Cols_ Number of columns, or \b Dynamic * \tparam Supers_ Number of super diagonal * \tparam Subs_ Number of sub diagonal * \tparam Options_ A combination of either \b #RowMajor or \b #ColMajor, and of \b #SelfAdjoint * The former controls \ref TopicStorageOrders "storage order", and defaults to * column-major. The latter controls whether the matrix represents a selfadjoint * matrix in which case either Supers of Subs have to be null. * * \sa class TridiagonalMatrix */ template struct traits > { typedef Scalar_ Scalar; typedef Dense StorageKind; typedef Eigen::Index StorageIndex; enum { CoeffReadCost = NumTraits::ReadCost, RowsAtCompileTime = Rows_, ColsAtCompileTime = Cols_, MaxRowsAtCompileTime = Rows_, MaxColsAtCompileTime = Cols_, Flags = LvalueBit, Supers = Supers_, Subs = Subs_, Options = Options_, DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic }; typedef Matrix CoefficientsType; }; template class BandMatrix : public BandMatrixBase > { public: typedef typename internal::traits::Scalar Scalar; typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::traits::CoefficientsType CoefficientsType; explicit inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs) : m_coeffs(1+supers+subs,cols), m_rows(rows), m_supers(supers), m_subs(subs) { } /** \returns the number of columns */ inline EIGEN_CONSTEXPR Index rows() const { return m_rows.value(); } /** \returns the number of rows */ inline EIGEN_CONSTEXPR Index cols() const { return m_coeffs.cols(); } /** \returns the number of super diagonals */ inline EIGEN_CONSTEXPR Index supers() const { return m_supers.value(); } /** \returns the number of sub diagonals */ inline EIGEN_CONSTEXPR Index subs() const { return m_subs.value(); } inline const CoefficientsType& coeffs() const { return m_coeffs; } inline CoefficientsType& coeffs() { return m_coeffs; } protected: CoefficientsType m_coeffs; internal::variable_if_dynamic m_rows; internal::variable_if_dynamic m_supers; internal::variable_if_dynamic m_subs; }; template class BandMatrixWrapper; template struct traits > { typedef typename _CoefficientsType::Scalar Scalar; typedef typename _CoefficientsType::StorageKind StorageKind; typedef typename _CoefficientsType::StorageIndex StorageIndex; enum { CoeffReadCost = internal::traits<_CoefficientsType>::CoeffReadCost, RowsAtCompileTime = Rows_, ColsAtCompileTime = Cols_, MaxRowsAtCompileTime = Rows_, MaxColsAtCompileTime = Cols_, Flags = LvalueBit, Supers = Supers_, Subs = Subs_, Options = Options_, DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic }; typedef _CoefficientsType CoefficientsType; }; template class BandMatrixWrapper : public BandMatrixBase > { public: typedef typename internal::traits::Scalar Scalar; typedef typename internal::traits::CoefficientsType CoefficientsType; typedef typename internal::traits::StorageIndex StorageIndex; explicit inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows=Rows_, Index cols=Cols_, Index supers=Supers_, Index subs=Subs_) : m_coeffs(coeffs), m_rows(rows), m_supers(supers), m_subs(subs) { EIGEN_UNUSED_VARIABLE(cols); //internal::assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows()); } /** \returns the number of columns */ inline EIGEN_CONSTEXPR Index rows() const { return m_rows.value(); } /** \returns the number of rows */ inline EIGEN_CONSTEXPR Index cols() const { return m_coeffs.cols(); } /** \returns the number of super diagonals */ inline EIGEN_CONSTEXPR Index supers() const { return m_supers.value(); } /** \returns the number of sub diagonals */ inline EIGEN_CONSTEXPR Index subs() const { return m_subs.value(); } inline const CoefficientsType& coeffs() const { return m_coeffs; } protected: const CoefficientsType& m_coeffs; internal::variable_if_dynamic m_rows; internal::variable_if_dynamic m_supers; internal::variable_if_dynamic m_subs; }; /** * \class TridiagonalMatrix * \ingroup Core_Module * * \brief Represents a tridiagonal matrix with a compact banded storage * * \tparam Scalar Numeric type, i.e. float, double, int * \tparam Size Number of rows and cols, or \b Dynamic * \tparam Options Can be 0 or \b SelfAdjoint * * \sa class BandMatrix */ template class TridiagonalMatrix : public BandMatrix { typedef BandMatrix Base; typedef typename Base::StorageIndex StorageIndex; public: explicit TridiagonalMatrix(Index size = Size) : Base(size,size,Options&SelfAdjoint?0:1,1) {} inline typename Base::template DiagonalIntReturnType<1>::Type super() { return Base::template diagonal<1>(); } inline const typename Base::template DiagonalIntReturnType<1>::Type super() const { return Base::template diagonal<1>(); } inline typename Base::template DiagonalIntReturnType<-1>::Type sub() { return Base::template diagonal<-1>(); } inline const typename Base::template DiagonalIntReturnType<-1>::Type sub() const { return Base::template diagonal<-1>(); } protected: }; struct BandShape {}; template struct evaluator_traits > : public evaluator_traits_base > { typedef BandShape Shape; }; template struct evaluator_traits > : public evaluator_traits_base > { typedef BandShape Shape; }; template<> struct AssignmentKind { typedef EigenBase2EigenBase Kind; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_BANDMATRIX_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Block.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2006-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BLOCK_H #define EIGEN_BLOCK_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > : traits { typedef typename traits::Scalar Scalar; typedef typename traits::StorageKind StorageKind; typedef typename traits::XprKind XprKind; typedef typename ref_selector::type XprTypeNested; typedef typename remove_reference::type _XprTypeNested; enum{ MatrixRows = traits::RowsAtCompileTime, MatrixCols = traits::ColsAtCompileTime, RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows, ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols, MaxRowsAtCompileTime = BlockRows==0 ? 0 : RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime) : int(traits::MaxRowsAtCompileTime), MaxColsAtCompileTime = BlockCols==0 ? 0 : ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime) : int(traits::MaxColsAtCompileTime), XprTypeIsRowMajor = (int(traits::Flags)&RowMajorBit) != 0, IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 : XprTypeIsRowMajor, HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor), InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), InnerStrideAtCompileTime = HasSameStorageOrderAsXprType ? int(inner_stride_at_compile_time::ret) : int(outer_stride_at_compile_time::ret), OuterStrideAtCompileTime = HasSameStorageOrderAsXprType ? int(outer_stride_at_compile_time::ret) : int(inner_stride_at_compile_time::ret), // FIXME, this traits is rather specialized for dense object and it needs to be cleaned further FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, Flags = (traits::Flags & (DirectAccessBit | (InnerPanel?CompressedAccessBit:0))) | FlagsLvalueBit | FlagsRowMajorBit, // FIXME DirectAccessBit should not be handled by expressions // // Alignment is needed by MapBase's assertions // We can sefely set it to false here. Internal alignment errors will be detected by an eigen_internal_assert in the respective evaluator Alignment = 0 }; }; template::ret> class BlockImpl_dense; } // end namespace internal template class BlockImpl; /** \class Block * \ingroup Core_Module * * \brief Expression of a fixed-size or dynamic-size block * * \tparam XprType the type of the expression in which we are taking a block * \tparam BlockRows the number of rows of the block we are taking at compile time (optional) * \tparam BlockCols the number of columns of the block we are taking at compile time (optional) * \tparam InnerPanel is true, if the block maps to a set of rows of a row major matrix or * to set of columns of a column major matrix (optional). The parameter allows to determine * at compile time whether aligned access is possible on the block expression. * * This class represents an expression of either a fixed-size or dynamic-size block. It is the return * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block(Index,Index) and * most of the time this is the only way it is used. * * However, if you want to directly maniputate block expressions, * for instance if you want to write a function returning such an expression, you * will need to use this class. * * Here is an example illustrating the dynamic case: * \include class_Block.cpp * Output: \verbinclude class_Block.out * * \note Even though this expression has dynamic size, in the case where \a XprType * has fixed size, this expression inherits a fixed maximal size which means that evaluating * it does not cause a dynamic memory allocation. * * Here is an example illustrating the fixed-size case: * \include class_FixedBlock.cpp * Output: \verbinclude class_FixedBlock.out * * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock */ template class Block : public BlockImpl::StorageKind> { typedef BlockImpl::StorageKind> Impl; public: //typedef typename Impl::Base Base; typedef Impl Base; EIGEN_GENERIC_PUBLIC_INTERFACE(Block) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block) typedef typename internal::remove_all::type NestedExpression; /** Column or Row constructor */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Block(XprType& xpr, Index i) : Impl(xpr,i) { eigen_assert( (i>=0) && ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i= 0 && BlockRows >= 0 && startRow + BlockRows <= xpr.rows() && startCol >= 0 && BlockCols >= 0 && startCol + BlockCols <= xpr.cols()); } /** Dynamic-size constructor */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Block(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) : Impl(xpr, startRow, startCol, blockRows, blockCols) { eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols)); eigen_assert(startRow >= 0 && blockRows >= 0 && startRow <= xpr.rows() - blockRows && startCol >= 0 && blockCols >= 0 && startCol <= xpr.cols() - blockCols); } }; // The generic default implementation for dense block simplu forward to the internal::BlockImpl_dense // that must be specialized for direct and non-direct access... template class BlockImpl : public internal::BlockImpl_dense { typedef internal::BlockImpl_dense Impl; typedef typename XprType::StorageIndex StorageIndex; public: typedef Impl Base; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index i) : Impl(xpr,i) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index startRow, Index startCol) : Impl(xpr, startRow, startCol) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) : Impl(xpr, startRow, startCol, blockRows, blockCols) {} }; namespace internal { /** \internal Internal implementation of dense Blocks in the general case. */ template class BlockImpl_dense : public internal::dense_xpr_base >::type { typedef Block BlockType; typedef typename internal::ref_selector::non_const_type XprTypeNested; public: typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(BlockType) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense) // class InnerIterator; // FIXME apparently never used /** Column or Row constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index i) : m_xpr(xpr), // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime, // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1, // all other cases are invalid. // The case a 1x1 matrix seems ambiguous, but the result is the same anyway. m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), m_blockRows(BlockRows==1 ? 1 : xpr.rows()), m_blockCols(BlockCols==1 ? 1 : xpr.cols()) {} /** Fixed-size constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol) : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(BlockRows), m_blockCols(BlockCols) {} /** Dynamic-size constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) {} EIGEN_DEVICE_FUNC inline Index rows() const { return m_blockRows.value(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_blockCols.value(); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index rowId, Index colId) { EIGEN_STATIC_ASSERT_LVALUE(XprType) return m_xpr.coeffRef(rowId + m_startRow.value(), colId + m_startCol.value()); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { return m_xpr.derived().coeffRef(rowId + m_startRow.value(), colId + m_startCol.value()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const { return m_xpr.coeff(rowId + m_startRow.value(), colId + m_startCol.value()); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) { EIGEN_STATIC_ASSERT_LVALUE(XprType) return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const { return m_xpr.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } template EIGEN_DEVICE_FUNC inline PacketScalar packet(Index rowId, Index colId) const { return m_xpr.template packet(rowId + m_startRow.value(), colId + m_startCol.value()); } template EIGEN_DEVICE_FUNC inline void writePacket(Index rowId, Index colId, const PacketScalar& val) { m_xpr.template writePacket(rowId + m_startRow.value(), colId + m_startCol.value(), val); } template EIGEN_DEVICE_FUNC inline PacketScalar packet(Index index) const { return m_xpr.template packet (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } template EIGEN_DEVICE_FUNC inline void writePacket(Index index, const PacketScalar& val) { m_xpr.template writePacket (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), val); } #ifdef EIGEN_PARSED_BY_DOXYGEN /** \sa MapBase::data() */ EIGEN_DEVICE_FUNC inline const Scalar* data() const; EIGEN_DEVICE_FUNC inline Index innerStride() const; EIGEN_DEVICE_FUNC inline Index outerStride() const; #endif EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename internal::remove_all::type& nestedExpression() const { return m_xpr; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE XprType& nestedExpression() { return m_xpr; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR StorageIndex startRow() const EIGEN_NOEXCEPT { return m_startRow.value(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR StorageIndex startCol() const EIGEN_NOEXCEPT { return m_startCol.value(); } protected: XprTypeNested m_xpr; const internal::variable_if_dynamic m_startRow; const internal::variable_if_dynamic m_startCol; const internal::variable_if_dynamic m_blockRows; const internal::variable_if_dynamic m_blockCols; }; /** \internal Internal implementation of dense Blocks in the direct access case.*/ template class BlockImpl_dense : public MapBase > { typedef Block BlockType; typedef typename internal::ref_selector::non_const_type XprTypeNested; enum { XprTypeIsRowMajor = (int(traits::Flags)&RowMajorBit) != 0 }; public: typedef MapBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(BlockType) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense) /** Column or Row constructor */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl_dense(XprType& xpr, Index i) : Base(xpr.data() + i * ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && (!XprTypeIsRowMajor)) || ((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && ( XprTypeIsRowMajor)) ? xpr.innerStride() : xpr.outerStride()), BlockRows==1 ? 1 : xpr.rows(), BlockCols==1 ? 1 : xpr.cols()), m_xpr(xpr), m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0) { init(); } /** Fixed-size constructor */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl_dense(XprType& xpr, Index startRow, Index startCol) : Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol)), m_xpr(xpr), m_startRow(startRow), m_startCol(startCol) { init(); } /** Dynamic-size constructor */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl_dense(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) : Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol), blockRows, blockCols), m_xpr(xpr), m_startRow(startRow), m_startCol(startCol) { init(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename internal::remove_all::type& nestedExpression() const EIGEN_NOEXCEPT { return m_xpr; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE XprType& nestedExpression() { return m_xpr; } /** \sa MapBase::innerStride() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index innerStride() const EIGEN_NOEXCEPT { return internal::traits::HasSameStorageOrderAsXprType ? m_xpr.innerStride() : m_xpr.outerStride(); } /** \sa MapBase::outerStride() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index outerStride() const EIGEN_NOEXCEPT { return internal::traits::HasSameStorageOrderAsXprType ? m_xpr.outerStride() : m_xpr.innerStride(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR StorageIndex startRow() const EIGEN_NOEXCEPT { return m_startRow.value(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR StorageIndex startCol() const EIGEN_NOEXCEPT { return m_startCol.value(); } #ifndef __SUNPRO_CC // FIXME sunstudio is not friendly with the above friend... // META-FIXME there is no 'friend' keyword around here. Is this obsolete? protected: #endif #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal used by allowAligned() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols) : Base(data, blockRows, blockCols), m_xpr(xpr) { init(); } #endif protected: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void init() { m_outerStride = internal::traits::HasSameStorageOrderAsXprType ? m_xpr.outerStride() : m_xpr.innerStride(); } XprTypeNested m_xpr; const internal::variable_if_dynamic m_startRow; const internal::variable_if_dynamic m_startCol; Index m_outerStride; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_BLOCK_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/BooleanRedux.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ALLANDANY_H #define EIGEN_ALLANDANY_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct all_unroller { enum { col = (UnrollCount-1) / Rows, row = (UnrollCount-1) % Rows }; EIGEN_DEVICE_FUNC static inline bool run(const Derived &mat) { return all_unroller::run(mat) && mat.coeff(row, col); } }; template struct all_unroller { EIGEN_DEVICE_FUNC static inline bool run(const Derived &/*mat*/) { return true; } }; template struct all_unroller { EIGEN_DEVICE_FUNC static inline bool run(const Derived &) { return false; } }; template struct any_unroller { enum { col = (UnrollCount-1) / Rows, row = (UnrollCount-1) % Rows }; EIGEN_DEVICE_FUNC static inline bool run(const Derived &mat) { return any_unroller::run(mat) || mat.coeff(row, col); } }; template struct any_unroller { EIGEN_DEVICE_FUNC static inline bool run(const Derived & /*mat*/) { return false; } }; template struct any_unroller { EIGEN_DEVICE_FUNC static inline bool run(const Derived &) { return false; } }; } // end namespace internal /** \returns true if all coefficients are true * * Example: \include MatrixBase_all.cpp * Output: \verbinclude MatrixBase_all.out * * \sa any(), Cwise::operator<() */ template EIGEN_DEVICE_FUNC inline bool DenseBase::all() const { typedef internal::evaluator Evaluator; enum { unroll = SizeAtCompileTime != Dynamic && SizeAtCompileTime * (int(Evaluator::CoeffReadCost) + int(NumTraits::AddCost)) <= EIGEN_UNROLLING_LIMIT }; Evaluator evaluator(derived()); if(unroll) return internal::all_unroller::RowsAtCompileTime>::run(evaluator); else { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if (!evaluator.coeff(i, j)) return false; return true; } } /** \returns true if at least one coefficient is true * * \sa all() */ template EIGEN_DEVICE_FUNC inline bool DenseBase::any() const { typedef internal::evaluator Evaluator; enum { unroll = SizeAtCompileTime != Dynamic && SizeAtCompileTime * (int(Evaluator::CoeffReadCost) + int(NumTraits::AddCost)) <= EIGEN_UNROLLING_LIMIT }; Evaluator evaluator(derived()); if(unroll) return internal::any_unroller::RowsAtCompileTime>::run(evaluator); else { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if (evaluator.coeff(i, j)) return true; return false; } } /** \returns the number of coefficients which evaluate to true * * \sa all(), any() */ template EIGEN_DEVICE_FUNC inline Eigen::Index DenseBase::count() const { return derived().template cast().template cast().sum(); } /** \returns true is \c *this contains at least one Not A Number (NaN). * * \sa allFinite() */ template inline bool DenseBase::hasNaN() const { #if EIGEN_COMP_MSVC || (defined __FAST_MATH__) return derived().array().isNaN().any(); #else return !((derived().array()==derived().array()).all()); #endif } /** \returns true if \c *this contains only finite numbers, i.e., no NaN and no +/-INF values. * * \sa hasNaN() */ template inline bool DenseBase::allFinite() const { #if EIGEN_COMP_MSVC || (defined __FAST_MATH__) return derived().array().isFinite().all(); #else return !((derived()-derived()).hasNaN()); #endif } } // end namespace Eigen #endif // EIGEN_ALLANDANY_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/CommaInitializer.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COMMAINITIALIZER_H #define EIGEN_COMMAINITIALIZER_H #include "./InternalHeaderCheck.h" namespace Eigen { /** \class CommaInitializer * \ingroup Core_Module * * \brief Helper class used by the comma initializer operator * * This class is internally used to implement the comma initializer feature. It is * the return type of MatrixBase::operator<<, and most of the time this is the only * way it is used. * * \sa \blank \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished() */ template struct CommaInitializer { typedef typename XprType::Scalar Scalar; EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const Scalar& s) : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1) { eigen_assert(m_xpr.rows() > 0 && m_xpr.cols() > 0 && "Cannot comma-initialize a 0x0 matrix (operator<<)"); m_xpr.coeffRef(0,0) = s; } template EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const DenseBase& other) : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows()) { eigen_assert(m_xpr.rows() >= other.rows() && m_xpr.cols() >= other.cols() && "Cannot comma-initialize a 0x0 matrix (operator<<)"); m_xpr.template block(0, 0, other.rows(), other.cols()) = other; } /* Copy/Move constructor which transfers ownership. This is crucial in * absence of return value optimization to avoid assertions during destruction. */ // FIXME in C++11 mode this could be replaced by a proper RValue constructor EIGEN_DEVICE_FUNC inline CommaInitializer(const CommaInitializer& o) : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) { // Mark original object as finished. In absence of R-value references we need to const_cast: const_cast(o).m_row = m_xpr.rows(); const_cast(o).m_col = m_xpr.cols(); const_cast(o).m_currentBlockRows = 0; } /* inserts a scalar value in the target matrix */ EIGEN_DEVICE_FUNC CommaInitializer& operator,(const Scalar& s) { if (m_col==m_xpr.cols()) { m_row+=m_currentBlockRows; m_col = 0; m_currentBlockRows = 1; eigen_assert(m_row EIGEN_DEVICE_FUNC CommaInitializer& operator,(const DenseBase& other) { if (m_col==m_xpr.cols() && (other.cols()!=0 || other.rows()!=m_currentBlockRows)) { m_row+=m_currentBlockRows; m_col = 0; m_currentBlockRows = other.rows(); eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows() && "Too many rows passed to comma initializer (operator<<)"); } eigen_assert((m_col + other.cols() <= m_xpr.cols()) && "Too many coefficients passed to comma initializer (operator<<)"); eigen_assert(m_currentBlockRows==other.rows()); m_xpr.template block (m_row, m_col, other.rows(), other.cols()) = other; m_col += other.cols(); return *this; } EIGEN_DEVICE_FUNC inline ~CommaInitializer() #if defined VERIFY_RAISES_ASSERT && (!defined EIGEN_NO_ASSERTION_CHECKING) && defined EIGEN_EXCEPTIONS EIGEN_EXCEPTION_SPEC(Eigen::eigen_assert_exception) #endif { finished(); } /** \returns the built matrix once all its coefficients have been set. * Calling finished is 100% optional. Its purpose is to write expressions * like this: * \code * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished()); * \endcode */ EIGEN_DEVICE_FUNC inline XprType& finished() { eigen_assert(((m_row+m_currentBlockRows) == m_xpr.rows() || m_xpr.cols() == 0) && m_col == m_xpr.cols() && "Too few coefficients passed to comma initializer (operator<<)"); return m_xpr; } XprType& m_xpr; // target expression Index m_row; // current row id Index m_col; // current col id Index m_currentBlockRows; // current block height }; /** \anchor MatrixBaseCommaInitRef * Convenient operator to set the coefficients of a matrix. * * The coefficients must be provided in a row major order and exactly match * the size of the matrix. Otherwise an assertion is raised. * * Example: \include MatrixBase_set.cpp * Output: \verbinclude MatrixBase_set.out * * \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order. * * \sa CommaInitializer::finished(), class CommaInitializer */ template EIGEN_DEVICE_FUNC inline CommaInitializer DenseBase::operator<< (const Scalar& s) { return CommaInitializer(*static_cast(this), s); } /** \sa operator<<(const Scalar&) */ template template EIGEN_DEVICE_FUNC inline CommaInitializer DenseBase::operator<<(const DenseBase& other) { return CommaInitializer(*static_cast(this), other); } } // end namespace Eigen #endif // EIGEN_COMMAINITIALIZER_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/ConditionEstimator.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com) // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CONDITIONESTIMATOR_H #define EIGEN_CONDITIONESTIMATOR_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct rcond_compute_sign { static inline Vector run(const Vector& v) { const RealVector v_abs = v.cwiseAbs(); return (v_abs.array() == static_cast(0)) .select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs)); } }; // Partial specialization to avoid elementwise division for real vectors. template struct rcond_compute_sign { static inline Vector run(const Vector& v) { return (v.array() < static_cast(0)) .select(-Vector::Ones(v.size()), Vector::Ones(v.size())); } }; /** * \returns an estimate of ||inv(matrix)||_1 given a decomposition of * \a matrix that implements .solve() and .adjoint().solve() methods. * * This function implements Algorithms 4.1 and 5.1 from * http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf * which also forms the basis for the condition number estimators in * LAPACK. Since at most 10 calls to the solve method of dec are * performed, the total cost is O(dims^2), as opposed to O(dims^3) * needed to compute the inverse matrix explicitly. * * The most common usage is in estimating the condition number * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be * computed directly in O(n^2) operations. * * Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and * LLT. * * \sa FullPivLU, PartialPivLU, LDLT, LLT. */ template typename Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate(const Decomposition& dec) { typedef typename Decomposition::MatrixType MatrixType; typedef typename Decomposition::Scalar Scalar; typedef typename Decomposition::RealScalar RealScalar; typedef typename internal::plain_col_type::type Vector; typedef typename internal::plain_col_type::type RealVector; const bool is_complex = (NumTraits::IsComplex != 0); eigen_assert(dec.rows() == dec.cols()); const Index n = dec.rows(); if (n == 0) return 0; // Disable Index to float conversion warning #ifdef __INTEL_COMPILER #pragma warning push #pragma warning ( disable : 2259 ) #endif Vector v = dec.solve(Vector::Ones(n) / Scalar(n)); #ifdef __INTEL_COMPILER #pragma warning pop #endif // lower_bound is a lower bound on // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1 // and is the objective maximized by the ("super-") gradient ascent // algorithm below. RealScalar lower_bound = v.template lpNorm<1>(); if (n == 1) return lower_bound; // Gradient ascent algorithm follows: We know that the optimum is achieved at // one of the simplices v = e_i, so in each iteration we follow a // super-gradient to move towards the optimal one. RealScalar old_lower_bound = lower_bound; Vector sign_vector(n); Vector old_sign_vector; Index v_max_abs_index = -1; Index old_v_max_abs_index = v_max_abs_index; for (int k = 0; k < 4; ++k) { sign_vector = internal::rcond_compute_sign::run(v); if (k > 0 && !is_complex && sign_vector == old_sign_vector) { // Break if the solution stagnated. break; } // v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )| v = dec.adjoint().solve(sign_vector); v.real().cwiseAbs().maxCoeff(&v_max_abs_index); if (v_max_abs_index == old_v_max_abs_index) { // Break if the solution stagnated. break; } // Move to the new simplex e_j, where j = v_max_abs_index. v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j. lower_bound = v.template lpNorm<1>(); if (lower_bound <= old_lower_bound) { // Break if the gradient step did not increase the lower_bound. break; } if (!is_complex) { old_sign_vector = sign_vector; } old_v_max_abs_index = v_max_abs_index; old_lower_bound = lower_bound; } // The following calculates an independent estimate of ||matrix||_1 by // multiplying matrix by a vector with entries of slowly increasing // magnitude and alternating sign: // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. // This improvement to Hager's algorithm above is due to Higham. It was // added to make the algorithm more robust in certain corner cases where // large elements in the matrix might otherwise escape detection due to // exact cancellation (especially when op and op_adjoint correspond to a // sequence of backsubstitutions and permutations), which could cause // Hager's algorithm to vastly underestimate ||matrix||_1. Scalar alternating_sign(RealScalar(1)); for (Index i = 0; i < n; ++i) { // The static_cast is needed when Scalar is a complex and RealScalar implements expression templates v[i] = alternating_sign * static_cast(RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1)))); alternating_sign = -alternating_sign; } v = dec.solve(v); const RealScalar alternate_lower_bound = (2 * v.template lpNorm<1>()) / (3 * RealScalar(n)); return numext::maxi(lower_bound, alternate_lower_bound); } /** \brief Reciprocal condition number estimator. * * Computing a decomposition of a dense matrix takes O(n^3) operations, while * this method estimates the condition number quickly and reliably in O(n^2) * operations. * * \returns an estimate of the reciprocal condition number * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and * its decomposition. Supports the following decompositions: FullPivLU, * PartialPivLU, LDLT, and LLT. * * \sa FullPivLU, PartialPivLU, LDLT, LLT. */ template typename Decomposition::RealScalar rcond_estimate_helper(typename Decomposition::RealScalar matrix_norm, const Decomposition& dec) { typedef typename Decomposition::RealScalar RealScalar; eigen_assert(dec.rows() == dec.cols()); if (dec.rows() == 0) return NumTraits::infinity(); if (matrix_norm == RealScalar(0)) return RealScalar(0); if (dec.rows() == 1) return RealScalar(1); const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec); return (inverse_matrix_norm == RealScalar(0) ? RealScalar(0) : (RealScalar(1) / inverse_matrix_norm) / matrix_norm); } } // namespace internal } // namespace Eigen #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/CoreEvaluators.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Benoit Jacob // Copyright (C) 2011-2014 Gael Guennebaud // Copyright (C) 2011-2012 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COREEVALUATORS_H #define EIGEN_COREEVALUATORS_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { // This class returns the evaluator kind from the expression storage kind. // Default assumes index based accessors template struct storage_kind_to_evaluator_kind { typedef IndexBased Kind; }; // This class returns the evaluator shape from the expression storage kind. // It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc. template struct storage_kind_to_shape; template<> struct storage_kind_to_shape { typedef DenseShape Shape; }; template<> struct storage_kind_to_shape { typedef SolverShape Shape; }; template<> struct storage_kind_to_shape { typedef PermutationShape Shape; }; template<> struct storage_kind_to_shape { typedef TranspositionsShape Shape; }; // Evaluators have to be specialized with respect to various criteria such as: // - storage/structure/shape // - scalar type // - etc. // Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators. // We currently distinguish the following kind of evaluators: // - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate) // - binary_evaluator for expression taking two arguments (CwiseBinaryOp) // - ternary_evaluator for expression taking three arguments (CwiseTernaryOp) // - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching. // - mapbase_evaluator for Map, Block, Ref // - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator) template< typename T, typename Arg1Kind = typename evaluator_traits::Kind, typename Arg2Kind = typename evaluator_traits::Kind, typename Arg3Kind = typename evaluator_traits::Kind, typename Arg1Scalar = typename traits::Scalar, typename Arg2Scalar = typename traits::Scalar, typename Arg3Scalar = typename traits::Scalar> struct ternary_evaluator; template< typename T, typename LhsKind = typename evaluator_traits::Kind, typename RhsKind = typename evaluator_traits::Kind, typename LhsScalar = typename traits::Scalar, typename RhsScalar = typename traits::Scalar> struct binary_evaluator; template< typename T, typename Kind = typename evaluator_traits::Kind, typename Scalar = typename T::Scalar> struct unary_evaluator; // evaluator_traits contains traits for evaluator template struct evaluator_traits_base { // by default, get evaluator kind and shape from storage typedef typename storage_kind_to_evaluator_kind::StorageKind>::Kind Kind; typedef typename storage_kind_to_shape::StorageKind>::Shape Shape; }; // Default evaluator traits template struct evaluator_traits : public evaluator_traits_base { }; template::Shape > struct evaluator_assume_aliasing { static const bool value = false; }; // By default, we assume a unary expression: template struct evaluator : public unary_evaluator { typedef unary_evaluator Base; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const T& xpr) : Base(xpr) {} }; // TODO: Think about const-correctness template struct evaluator : evaluator { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const T& xpr) : evaluator(xpr) {} }; // ---------- base class for all evaluators ---------- template struct evaluator_base { // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices. typedef traits ExpressionTraits; enum { Alignment = 0 }; // noncopyable: // Don't make this class inherit noncopyable as this kills EBO (Empty Base Optimization) // and make complex evaluator much larger than then should do. EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator_base() {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~evaluator_base() {} private: EIGEN_DEVICE_FUNC evaluator_base(const evaluator_base&); EIGEN_DEVICE_FUNC const evaluator_base& operator=(const evaluator_base&); }; // -------------------- Matrix and Array -------------------- // // evaluator is a common base class for the // Matrix and Array evaluators. // Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense, // so no need for more sophisticated dispatching. // this helper permits to completely eliminate m_outerStride if it is known at compiletime. template class plainobjectbase_evaluator_data { public: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr) { #ifndef EIGEN_INTERNAL_DEBUGGING EIGEN_UNUSED_VARIABLE(outerStride); #endif eigen_internal_assert(outerStride==OuterStride); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index outerStride() const EIGEN_NOEXCEPT { return OuterStride; } const Scalar *data; }; template class plainobjectbase_evaluator_data { public: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outerStride() const { return m_outerStride; } const Scalar *data; protected: Index m_outerStride; }; template struct evaluator > : evaluator_base { typedef PlainObjectBase PlainObjectType; typedef typename PlainObjectType::Scalar Scalar; typedef typename PlainObjectType::CoeffReturnType CoeffReturnType; enum { IsRowMajor = PlainObjectType::IsRowMajor, IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime, RowsAtCompileTime = PlainObjectType::RowsAtCompileTime, ColsAtCompileTime = PlainObjectType::ColsAtCompileTime, CoeffReadCost = NumTraits::ReadCost, Flags = traits::EvaluatorFlags, Alignment = traits::Alignment }; enum { // We do not need to know the outer stride for vectors OuterStrideAtCompileTime = IsVectorAtCompileTime ? 0 : int(IsRowMajor) ? ColsAtCompileTime : RowsAtCompileTime }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator() : m_d(0,OuterStrideAtCompileTime) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const PlainObjectType& m) : m_d(m.data(),IsVectorAtCompileTime ? 0 : m.outerStride()) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { if (IsRowMajor) return m_d.data[row * m_d.outerStride() + col]; else return m_d.data[row + col * m_d.outerStride()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_d.data[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { if (IsRowMajor) return const_cast(m_d.data)[row * m_d.outerStride() + col]; else return const_cast(m_d.data)[row + col * m_d.outerStride()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return const_cast(m_d.data)[index]; } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { if (IsRowMajor) return ploadt(m_d.data + row * m_d.outerStride() + col); else return ploadt(m_d.data + row + col * m_d.outerStride()); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return ploadt(m_d.data + index); } template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { if (IsRowMajor) return pstoret (const_cast(m_d.data) + row * m_d.outerStride() + col, x); else return pstoret (const_cast(m_d.data) + row + col * m_d.outerStride(), x); } template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { return pstoret(const_cast(m_d.data) + index, x); } protected: plainobjectbase_evaluator_data m_d; }; template struct evaluator > : evaluator > > { typedef Matrix XprType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator() {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& m) : evaluator >(m) { } }; template struct evaluator > : evaluator > > { typedef Array XprType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator() {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& m) : evaluator >(m) { } }; // -------------------- Transpose -------------------- template struct unary_evaluator, IndexBased> : evaluator_base > { typedef Transpose XprType; enum { CoeffReadCost = evaluator::CoeffReadCost, Flags = evaluator::Flags ^ RowMajorBit, Alignment = evaluator::Alignment }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {} typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(col, row); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(col, row); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename XprType::Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_argImpl.template packet(col, row); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_argImpl.template packet(index); } template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { m_argImpl.template writePacket(col, row, x); } template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { m_argImpl.template writePacket(index, x); } protected: evaluator m_argImpl; }; // -------------------- CwiseNullaryOp -------------------- // Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator. // Likewise, there is not need to more sophisticated dispatching here. template::value, bool has_unary = has_unary_operator::value, bool has_binary = has_binary_operator::value> struct nullary_wrapper { template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp(i,j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp(i); } }; template struct nullary_wrapper { template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp(); } }; template struct nullary_wrapper { template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp(i,j); } }; // We need the following specialization for vector-only functors assigned to a runtime vector, // for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd. // In this case, i==0 and j is used for the actual iteration. template struct nullary_wrapper { template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { eigen_assert(i==0 || j==0); return op(i+j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { eigen_assert(i==0 || j==0); return op.template packetOp(i+j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp(i); } }; template struct nullary_wrapper {}; #if 0 && EIGEN_COMP_MSVC>0 // Disable this ugly workaround. This is now handled in traits::match, // but this piece of code might still become handly if some other weird compilation // erros pop up again. // MSVC exhibits a weird compilation error when // compiling: // Eigen::MatrixXf A = MatrixXf::Random(3,3); // Ref R = 2.f*A; // and that has_*ary_operator> have not been instantiated yet. // The "problem" is that evaluator<2.f*A> is instantiated by traits::match<2.f*A> // and at that time has_*ary_operator returns true regardless of T. // Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>. // The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(), // and packet() are really instantiated as implemented below: // This is a simple wrapper around Index to enforce the re-instantiation of // has_*ary_operator when needed. template struct nullary_wrapper_workaround_msvc { nullary_wrapper_workaround_msvc(const T&); operator T()const; }; template struct nullary_wrapper { template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return nullary_wrapper >::value, has_unary_operator >::value, has_binary_operator >::value>().operator()(op,i,j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return nullary_wrapper >::value, has_unary_operator >::value, has_binary_operator >::value>().operator()(op,i); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return nullary_wrapper >::value, has_unary_operator >::value, has_binary_operator >::value>().template packetOp(op,i,j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return nullary_wrapper >::value, has_unary_operator >::value, has_binary_operator >::value>().template packetOp(op,i); } }; #endif // MSVC workaround template struct evaluator > : evaluator_base > { typedef CwiseNullaryOp XprType; typedef typename internal::remove_all::type PlainObjectTypeCleaned; enum { CoeffReadCost = internal::functor_traits::Cost, Flags = (evaluator::Flags & ( HereditaryBits | (functor_has_linear_access::ret ? LinearAccessBit : 0) | (functor_traits::PacketAccess ? PacketAccessBit : 0))) | (functor_traits::IsRepeatable ? 0 : EvalBeforeNestingBit), Alignment = AlignedMax }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n) : m_functor(n.functor()), m_wrapper() { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(IndexType row, IndexType col) const { return m_wrapper(m_functor, row, col); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(IndexType index) const { return m_wrapper(m_functor,index); } template EIGEN_STRONG_INLINE PacketType packet(IndexType row, IndexType col) const { return m_wrapper.template packetOp(m_functor, row, col); } template EIGEN_STRONG_INLINE PacketType packet(IndexType index) const { return m_wrapper.template packetOp(m_functor, index); } protected: const NullaryOp m_functor; const internal::nullary_wrapper m_wrapper; }; // -------------------- CwiseUnaryOp -------------------- template struct unary_evaluator, IndexBased > : evaluator_base > { typedef CwiseUnaryOp XprType; enum { CoeffReadCost = int(evaluator::CoeffReadCost) + int(functor_traits::Cost), Flags = evaluator::Flags & (HereditaryBits | LinearAccessBit | (functor_traits::PacketAccess ? PacketAccessBit : 0)), Alignment = evaluator::Alignment }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& op) : m_d(op) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_d.func()(m_d.argImpl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_d.func()(m_d.argImpl.coeff(index)); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_d.func().packetOp(m_d.argImpl.template packet(row, col)); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_d.func().packetOp(m_d.argImpl.template packet(index)); } protected: // this helper permits to completely eliminate the functor if it is empty struct Data { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const UnaryOp& func() const { return op; } UnaryOp op; evaluator argImpl; }; Data m_d; }; // -------------------- CwiseTernaryOp -------------------- // this is a ternary expression template struct evaluator > : public ternary_evaluator > { typedef CwiseTernaryOp XprType; typedef ternary_evaluator > Base; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {} }; template struct ternary_evaluator, IndexBased, IndexBased> : evaluator_base > { typedef CwiseTernaryOp XprType; enum { CoeffReadCost = int(evaluator::CoeffReadCost) + int(evaluator::CoeffReadCost) + int(evaluator::CoeffReadCost) + int(functor_traits::Cost), Arg1Flags = evaluator::Flags, Arg2Flags = evaluator::Flags, Arg3Flags = evaluator::Flags, SameType = is_same::value && is_same::value, StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit), Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & ( HereditaryBits | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) & ( (StorageOrdersAgree ? LinearAccessBit : 0) | (functor_traits::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) ) ) ), Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit), Alignment = EIGEN_PLAIN_ENUM_MIN( EIGEN_PLAIN_ENUM_MIN(evaluator::Alignment, evaluator::Alignment), evaluator::Alignment) }; EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) : m_d(xpr) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_d.func()(m_d.arg1Impl.coeff(row, col), m_d.arg2Impl.coeff(row, col), m_d.arg3Impl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_d.func()(m_d.arg1Impl.coeff(index), m_d.arg2Impl.coeff(index), m_d.arg3Impl.coeff(index)); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_d.func().packetOp(m_d.arg1Impl.template packet(row, col), m_d.arg2Impl.template packet(row, col), m_d.arg3Impl.template packet(row, col)); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_d.func().packetOp(m_d.arg1Impl.template packet(index), m_d.arg2Impl.template packet(index), m_d.arg3Impl.template packet(index)); } protected: // this helper permits to completely eliminate the functor if it is empty struct Data { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Data(const XprType& xpr) : op(xpr.functor()), arg1Impl(xpr.arg1()), arg2Impl(xpr.arg2()), arg3Impl(xpr.arg3()) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TernaryOp& func() const { return op; } TernaryOp op; evaluator arg1Impl; evaluator arg2Impl; evaluator arg3Impl; }; Data m_d; }; // -------------------- CwiseBinaryOp -------------------- // this is a binary expression template struct evaluator > : public binary_evaluator > { typedef CwiseBinaryOp XprType; typedef binary_evaluator > Base; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(xpr) {} }; template struct binary_evaluator, IndexBased, IndexBased> : evaluator_base > { typedef CwiseBinaryOp XprType; enum { CoeffReadCost = int(evaluator::CoeffReadCost) + int(evaluator::CoeffReadCost) + int(functor_traits::Cost), LhsFlags = evaluator::Flags, RhsFlags = evaluator::Flags, SameType = is_same::value, StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit), Flags0 = (int(LhsFlags) | int(RhsFlags)) & ( HereditaryBits | (int(LhsFlags) & int(RhsFlags) & ( (StorageOrdersAgree ? LinearAccessBit : 0) | (functor_traits::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) ) ) ), Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit), Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator::Alignment,evaluator::Alignment) }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit binary_evaluator(const XprType& xpr) : m_d(xpr) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_d.func()(m_d.lhsImpl.coeff(row, col), m_d.rhsImpl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_d.func()(m_d.lhsImpl.coeff(index), m_d.rhsImpl.coeff(index)); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_d.func().packetOp(m_d.lhsImpl.template packet(row, col), m_d.rhsImpl.template packet(row, col)); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_d.func().packetOp(m_d.lhsImpl.template packet(index), m_d.rhsImpl.template packet(index)); } protected: // this helper permits to completely eliminate the functor if it is empty struct Data { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Data(const XprType& xpr) : op(xpr.functor()), lhsImpl(xpr.lhs()), rhsImpl(xpr.rhs()) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const BinaryOp& func() const { return op; } BinaryOp op; evaluator lhsImpl; evaluator rhsImpl; }; Data m_d; }; // -------------------- CwiseUnaryView -------------------- template struct unary_evaluator, IndexBased> : evaluator_base > { typedef CwiseUnaryView XprType; enum { CoeffReadCost = int(evaluator::CoeffReadCost) + int(functor_traits::Cost), Flags = (evaluator::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)), Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost... }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) : m_d(op) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_d.func()(m_d.argImpl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_d.func()(m_d.argImpl.coeff(index)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_d.func()(m_d.argImpl.coeffRef(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_d.func()(m_d.argImpl.coeffRef(index)); } protected: // this helper permits to completely eliminate the functor if it is empty struct Data { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const UnaryOp& func() const { return op; } UnaryOp op; evaluator argImpl; }; Data m_d; }; // -------------------- Map -------------------- // FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ? // but that might complicate template specialization template struct mapbase_evaluator; template struct mapbase_evaluator : evaluator_base { typedef Derived XprType; typedef typename XprType::PointerType PointerType; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { IsRowMajor = XprType::RowsAtCompileTime, ColsAtCompileTime = XprType::ColsAtCompileTime, CoeffReadCost = NumTraits::ReadCost }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit mapbase_evaluator(const XprType& map) : m_data(const_cast(map.data())), m_innerStride(map.innerStride()), m_outerStride(map.outerStride()) { EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(evaluator::Flags&PacketAccessBit, internal::inner_stride_at_compile_time::ret==1), PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_data[col * colStride() + row * rowStride()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_data[index * m_innerStride.value()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_data[col * colStride() + row * rowStride()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_data[index * m_innerStride.value()]; } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { PointerType ptr = m_data + row * rowStride() + col * colStride(); return internal::ploadt(ptr); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return internal::ploadt(m_data + index * m_innerStride.value()); } template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { PointerType ptr = m_data + row * rowStride() + col * colStride(); return internal::pstoret(ptr, x); } template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { internal::pstoret(m_data + index * m_innerStride.value(), x); } protected: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rowStride() const EIGEN_NOEXCEPT { return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index colStride() const EIGEN_NOEXCEPT { return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); } PointerType m_data; const internal::variable_if_dynamic m_innerStride; const internal::variable_if_dynamic m_outerStride; }; template struct evaluator > : public mapbase_evaluator, PlainObjectType> { typedef Map XprType; typedef typename XprType::Scalar Scalar; // TODO: should check for smaller packet types once we can handle multi-sized packet types typedef typename packet_traits::type PacketScalar; enum { InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 ? int(PlainObjectType::InnerStrideAtCompileTime) : int(StrideType::InnerStrideAtCompileTime), OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 ? int(PlainObjectType::OuterStrideAtCompileTime) : int(StrideType::OuterStrideAtCompileTime), HasNoInnerStride = InnerStrideAtCompileTime == 1, HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0, HasNoStride = HasNoInnerStride && HasNoOuterStride, IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic, PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit), LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit), Flags = int( evaluator::Flags) & (LinearAccessMask&PacketAccessMask), Alignment = int(MapOptions)&int(AlignedMask) }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map) : mapbase_evaluator(map) { } }; // -------------------- Ref -------------------- template struct evaluator > : public mapbase_evaluator, PlainObjectType> { typedef Ref XprType; enum { Flags = evaluator >::Flags, Alignment = evaluator >::Alignment }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& ref) : mapbase_evaluator(ref) { } }; // -------------------- Block -------------------- template::ret> struct block_evaluator; template struct evaluator > : block_evaluator { typedef Block XprType; typedef typename XprType::Scalar Scalar; // TODO: should check for smaller packet types once we can handle multi-sized packet types typedef typename packet_traits::type PacketScalar; enum { CoeffReadCost = evaluator::CoeffReadCost, RowsAtCompileTime = traits::RowsAtCompileTime, ColsAtCompileTime = traits::ColsAtCompileTime, MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, MaxColsAtCompileTime = traits::MaxColsAtCompileTime, ArgTypeIsRowMajor = (int(evaluator::Flags)&RowMajorBit) != 0, IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1 : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0 : ArgTypeIsRowMajor, HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor), InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), InnerStrideAtCompileTime = HasSameStorageOrderAsArgType ? int(inner_stride_at_compile_time::ret) : int(outer_stride_at_compile_time::ret), OuterStrideAtCompileTime = HasSameStorageOrderAsArgType ? int(outer_stride_at_compile_time::ret) : int(inner_stride_at_compile_time::ret), MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0, FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator::Flags&LinearAccessBit))) ? LinearAccessBit : 0, FlagsRowMajorBit = XprType::Flags&RowMajorBit, Flags0 = evaluator::Flags & ( (HereditaryBits & ~RowMajorBit) | DirectAccessBit | MaskPacketAccessBit), Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit, PacketAlignment = unpacket_traits::alignment, Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (OuterStrideAtCompileTime!=0) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0, Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator::Alignment, Alignment0) }; typedef block_evaluator block_evaluator_type; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& block) : block_evaluator_type(block) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } }; // no direct-access => dispatch to a unary evaluator template struct block_evaluator : unary_evaluator > { typedef Block XprType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit block_evaluator(const XprType& block) : unary_evaluator(block) {} }; template struct unary_evaluator, IndexBased> : evaluator_base > { typedef Block XprType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& block) : m_argImpl(block.nestedExpression()), m_startRow(block.startRow()), m_startCol(block.startCol()), m_linear_offset(ForwardLinearAccess?(ArgType::IsRowMajor ? block.startRow()*block.nestedExpression().cols() + block.startCol() : block.startCol()*block.nestedExpression().rows() + block.startRow()):0) { } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { RowsAtCompileTime = XprType::RowsAtCompileTime, ForwardLinearAccess = (InnerPanel || int(XprType::IsRowMajor)==int(ArgType::IsRowMajor)) && bool(evaluator::Flags&LinearAccessBit) }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return linear_coeff_impl(index, bool_constant()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return linear_coeffRef_impl(index, bool_constant()); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_argImpl.template packet(m_startRow.value() + row, m_startCol.value() + col); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { if (ForwardLinearAccess) return m_argImpl.template packet(m_linear_offset.value() + index); else return packet(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); } template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { return m_argImpl.template writePacket(m_startRow.value() + row, m_startCol.value() + col, x); } template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { if (ForwardLinearAccess) return m_argImpl.template writePacket(m_linear_offset.value() + index, x); else return writePacket(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0, x); } protected: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType linear_coeff_impl(Index index, internal::true_type /* ForwardLinearAccess */) const { return m_argImpl.coeff(m_linear_offset.value() + index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType linear_coeff_impl(Index index, internal::false_type /* not ForwardLinearAccess */) const { return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& linear_coeffRef_impl(Index index, internal::true_type /* ForwardLinearAccess */) { return m_argImpl.coeffRef(m_linear_offset.value() + index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& linear_coeffRef_impl(Index index, internal::false_type /* not ForwardLinearAccess */) { return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); } evaluator m_argImpl; const variable_if_dynamic m_startRow; const variable_if_dynamic m_startCol; const variable_if_dynamic m_linear_offset; }; // TODO: This evaluator does not actually use the child evaluator; // all action is via the data() as returned by the Block expression. template struct block_evaluator : mapbase_evaluator, typename Block::PlainObject> { typedef Block XprType; typedef typename XprType::Scalar Scalar; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit block_evaluator(const XprType& block) : mapbase_evaluator(block) { // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator::Alignment)) == 0) && "data is not aligned"); } }; // -------------------- Select -------------------- // NOTE shall we introduce a ternary_evaluator? // TODO enable vectorization for Select template struct evaluator > : evaluator_base > { typedef Select XprType; enum { CoeffReadCost = evaluator::CoeffReadCost + EIGEN_PLAIN_ENUM_MAX(evaluator::CoeffReadCost, evaluator::CoeffReadCost), Flags = (unsigned int)evaluator::Flags & evaluator::Flags & HereditaryBits, Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator::Alignment, evaluator::Alignment) }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& select) : m_conditionImpl(select.conditionMatrix()), m_thenImpl(select.thenMatrix()), m_elseImpl(select.elseMatrix()) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { if (m_conditionImpl.coeff(row, col)) return m_thenImpl.coeff(row, col); else return m_elseImpl.coeff(row, col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { if (m_conditionImpl.coeff(index)) return m_thenImpl.coeff(index); else return m_elseImpl.coeff(index); } protected: evaluator m_conditionImpl; evaluator m_thenImpl; evaluator m_elseImpl; }; // -------------------- Replicate -------------------- template struct unary_evaluator > : evaluator_base > { typedef Replicate XprType; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor }; typedef typename internal::nested_eval::type ArgTypeNested; typedef typename internal::remove_all::type ArgTypeNestedCleaned; enum { CoeffReadCost = evaluator::CoeffReadCost, LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0, Flags = (evaluator::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits::Flags & RowMajorBit), Alignment = evaluator::Alignment }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& replicate) : m_arg(replicate.nestedExpression()), m_argImpl(m_arg), m_rows(replicate.nestedExpression().rows()), m_cols(replicate.nestedExpression().cols()) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { // try to avoid using modulo; this is a pure optimization strategy const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? row : row % m_rows.value(); const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? col : col % m_cols.value(); return m_argImpl.coeff(actual_row, actual_col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { // try to avoid using modulo; this is a pure optimization strategy const Index actual_index = internal::traits::RowsAtCompileTime==1 ? (ColFactor==1 ? index : index%m_cols.value()) : (RowFactor==1 ? index : index%m_rows.value()); return m_argImpl.coeff(actual_index); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? row : row % m_rows.value(); const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? col : col % m_cols.value(); return m_argImpl.template packet(actual_row, actual_col); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { const Index actual_index = internal::traits::RowsAtCompileTime==1 ? (ColFactor==1 ? index : index%m_cols.value()) : (RowFactor==1 ? index : index%m_rows.value()); return m_argImpl.template packet(actual_index); } protected: const ArgTypeNested m_arg; evaluator m_argImpl; const variable_if_dynamic m_rows; const variable_if_dynamic m_cols; }; // -------------------- MatrixWrapper and ArrayWrapper -------------------- // // evaluator_wrapper_base is a common base class for the // MatrixWrapper and ArrayWrapper evaluators. template struct evaluator_wrapper_base : evaluator_base { typedef typename remove_all::type ArgType; enum { CoeffReadCost = evaluator::CoeffReadCost, Flags = evaluator::Flags, Alignment = evaluator::Alignment }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {} typedef typename ArgType::Scalar Scalar; typedef typename ArgType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(row, col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(row, col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_argImpl.template packet(row, col); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_argImpl.template packet(index); } template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { m_argImpl.template writePacket(row, col, x); } template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { m_argImpl.template writePacket(index, x); } protected: evaluator m_argImpl; }; template struct unary_evaluator > : evaluator_wrapper_base > { typedef MatrixWrapper XprType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& wrapper) : evaluator_wrapper_base >(wrapper.nestedExpression()) { } }; template struct unary_evaluator > : evaluator_wrapper_base > { typedef ArrayWrapper XprType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& wrapper) : evaluator_wrapper_base >(wrapper.nestedExpression()) { } }; // -------------------- Reverse -------------------- // defined in Reverse.h: template struct reverse_packet_cond; template struct unary_evaluator > : evaluator_base > { typedef Reverse XprType; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { IsRowMajor = XprType::IsRowMajor, IsColMajor = !IsRowMajor, ReverseRow = (Direction == Vertical) || (Direction == BothDirections), ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), ReversePacket = (Direction == BothDirections) || ((Direction == Vertical) && IsColMajor) || ((Direction == Horizontal) && IsRowMajor), CoeffReadCost = evaluator::CoeffReadCost, // let's enable LinearAccess only with vectorization because of the product overhead // FIXME enable DirectAccess with negative strides? Flags0 = evaluator::Flags, LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) ) || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1)) ? LinearAccessBit : 0, Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess), Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f. }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& reverse) : m_argImpl(reverse.nestedExpression()), m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1), m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row, ReverseCol ? m_cols.value() - col - 1 : col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row, ReverseCol ? m_cols.value() - col - 1 : col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { enum { PacketSize = unpacket_traits::size, OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 }; typedef internal::reverse_packet_cond reverse_packet; return reverse_packet::run(m_argImpl.template packet( ReverseRow ? m_rows.value() - row - OffsetRow : row, ReverseCol ? m_cols.value() - col - OffsetCol : col)); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { enum { PacketSize = unpacket_traits::size }; return preverse(m_argImpl.template packet(m_rows.value() * m_cols.value() - index - PacketSize)); } template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { // FIXME we could factorize some code with packet(i,j) enum { PacketSize = unpacket_traits::size, OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 }; typedef internal::reverse_packet_cond reverse_packet; m_argImpl.template writePacket( ReverseRow ? m_rows.value() - row - OffsetRow : row, ReverseCol ? m_cols.value() - col - OffsetCol : col, reverse_packet::run(x)); } template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { enum { PacketSize = unpacket_traits::size }; m_argImpl.template writePacket (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x)); } protected: evaluator m_argImpl; // If we do not reverse rows, then we do not need to know the number of rows; same for columns // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors. const variable_if_dynamic m_rows; const variable_if_dynamic m_cols; }; // -------------------- Diagonal -------------------- template struct evaluator > : evaluator_base > { typedef Diagonal XprType; enum { CoeffReadCost = evaluator::CoeffReadCost, Flags = (unsigned int)(evaluator::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit, Alignment = 0 }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& diagonal) : m_argImpl(diagonal.nestedExpression()), m_index(diagonal.index()) { } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index) const { return m_argImpl.coeff(row + rowOffset(), row + colOffset()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index + rowOffset(), index + colOffset()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index) { return m_argImpl.coeffRef(row + rowOffset(), row + colOffset()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index + rowOffset(), index + colOffset()); } protected: evaluator m_argImpl; const internal::variable_if_dynamicindex m_index; private: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; } }; //---------------------------------------------------------------------- // deprecated code //---------------------------------------------------------------------- // -------------------- EvalToTemp -------------------- // expression class for evaluating nested expression to a temporary template class EvalToTemp; template struct traits > : public traits { }; template class EvalToTemp : public dense_xpr_base >::type { public: typedef typename dense_xpr_base::type Base; EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp) explicit EvalToTemp(const ArgType& arg) : m_arg(arg) { } const ArgType& arg() const { return m_arg; } EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_arg.rows(); } EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_arg.cols(); } private: const ArgType& m_arg; }; template struct evaluator > : public evaluator { typedef EvalToTemp XprType; typedef typename ArgType::PlainObject PlainObject; typedef evaluator Base; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : m_result(xpr.arg()) { ::new (static_cast(this)) Base(m_result); } // This constructor is used when nesting an EvalTo evaluator in another evaluator EIGEN_DEVICE_FUNC evaluator(const ArgType& arg) : m_result(arg) { ::new (static_cast(this)) Base(m_result); } protected: PlainObject m_result; }; } // namespace internal } // end namespace Eigen #endif // EIGEN_COREEVALUATORS_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/CoreIterators.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COREITERATORS_H #define EIGEN_COREITERATORS_H #include "./InternalHeaderCheck.h" namespace Eigen { /* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core */ namespace internal { template class inner_iterator_selector; } /** \class InnerIterator * \brief An InnerIterator allows to loop over the element of any matrix expression. * * \warning To be used with care because an evaluator is constructed every time an InnerIterator iterator is constructed. * * TODO: add a usage example */ template class InnerIterator { protected: typedef internal::inner_iterator_selector::Kind> IteratorType; typedef internal::evaluator EvaluatorType; typedef typename internal::traits::Scalar Scalar; public: /** Construct an iterator over the \a outerId -th row or column of \a xpr */ InnerIterator(const XprType &xpr, const Index &outerId) : m_eval(xpr), m_iter(m_eval, outerId, xpr.innerSize()) {} /// \returns the value of the current coefficient. EIGEN_STRONG_INLINE Scalar value() const { return m_iter.value(); } /** Increment the iterator \c *this to the next non-zero coefficient. * Explicit zeros are not skipped over. To skip explicit zeros, see class SparseView */ EIGEN_STRONG_INLINE InnerIterator& operator++() { m_iter.operator++(); return *this; } EIGEN_STRONG_INLINE InnerIterator& operator+=(Index i) { m_iter.operator+=(i); return *this; } EIGEN_STRONG_INLINE InnerIterator operator+(Index i) { InnerIterator result(*this); result+=i; return result; } /// \returns the column or row index of the current coefficient. EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); } /// \returns the row index of the current coefficient. EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); } /// \returns the column index of the current coefficient. EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); } /// \returns \c true if the iterator \c *this still references a valid coefficient. EIGEN_STRONG_INLINE operator bool() const { return m_iter; } protected: EvaluatorType m_eval; IteratorType m_iter; private: // If you get here, then you're not using the right InnerIterator type, e.g.: // SparseMatrix A; // SparseMatrix::InnerIterator it(A,0); template InnerIterator(const EigenBase&,Index outer); }; namespace internal { // Generic inner iterator implementation for dense objects template class inner_iterator_selector { protected: typedef evaluator EvaluatorType; typedef typename traits::Scalar Scalar; enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit }; public: EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &innerSize) : m_eval(eval), m_inner(0), m_outer(outerId), m_end(innerSize) {} EIGEN_STRONG_INLINE Scalar value() const { return (IsRowMajor) ? m_eval.coeff(m_outer, m_inner) : m_eval.coeff(m_inner, m_outer); } EIGEN_STRONG_INLINE inner_iterator_selector& operator++() { m_inner++; return *this; } EIGEN_STRONG_INLINE Index index() const { return m_inner; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } protected: const EvaluatorType& m_eval; Index m_inner; const Index m_outer; const Index m_end; }; // For iterator-based evaluator, inner-iterator is already implemented as // evaluator<>::InnerIterator template class inner_iterator_selector : public evaluator::InnerIterator { protected: typedef typename evaluator::InnerIterator Base; typedef evaluator EvaluatorType; public: EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &/*innerSize*/) : Base(eval, outerId) {} }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_COREITERATORS_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/CwiseBinaryOp.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_BINARY_OP_H #define EIGEN_CWISE_BINARY_OP_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > { // we must not inherit from traits since it has // the potential to cause problems with MSVC typedef typename remove_all::type Ancestor; typedef typename traits::XprKind XprKind; enum { RowsAtCompileTime = traits::RowsAtCompileTime, ColsAtCompileTime = traits::ColsAtCompileTime, MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, MaxColsAtCompileTime = traits::MaxColsAtCompileTime }; // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor), // we still want to handle the case when the result type is different. typedef typename result_of< BinaryOp( const typename Lhs::Scalar&, const typename Rhs::Scalar& ) >::type Scalar; typedef typename cwise_promote_storage_type::StorageKind, typename traits::StorageKind, BinaryOp>::ret StorageKind; typedef typename promote_index_type::StorageIndex, typename traits::StorageIndex>::type StorageIndex; typedef typename Lhs::Nested LhsNested; typedef typename Rhs::Nested RhsNested; typedef typename remove_reference::type _LhsNested; typedef typename remove_reference::type _RhsNested; enum { Flags = cwise_promote_storage_order::StorageKind,typename traits::StorageKind,_LhsNested::Flags & RowMajorBit,_RhsNested::Flags & RowMajorBit>::value }; }; } // end namespace internal template class CwiseBinaryOpImpl; /** \class CwiseBinaryOp * \ingroup Core_Module * * \brief Generic expression where a coefficient-wise binary operator is applied to two expressions * * \tparam BinaryOp template functor implementing the operator * \tparam LhsType the type of the left-hand side * \tparam RhsType the type of the right-hand side * * This class represents an expression where a coefficient-wise binary operator is applied to two expressions. * It is the return type of binary operators, by which we mean only those binary operators where * both the left-hand side and the right-hand side are Eigen expressions. * For example, the return type of matrix1+matrix2 is a CwiseBinaryOp. * * Most of the time, this is the only way that it is used, so you typically don't have to name * CwiseBinaryOp types explicitly. * * \sa MatrixBase::binaryExpr(const MatrixBase &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp */ template class CwiseBinaryOp : public CwiseBinaryOpImpl< BinaryOp, LhsType, RhsType, typename internal::cwise_promote_storage_type::StorageKind, typename internal::traits::StorageKind, BinaryOp>::ret>, internal::no_assignment_operator { public: typedef typename internal::remove_all::type Functor; typedef typename internal::remove_all::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename CwiseBinaryOpImpl< BinaryOp, LhsType, RhsType, typename internal::cwise_promote_storage_type::StorageKind, typename internal::traits::StorageKind, BinaryOp>::ret>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp) EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs) typedef typename internal::ref_selector::type LhsNested; typedef typename internal::ref_selector::type RhsNested; typedef typename internal::remove_reference::type _LhsNested; typedef typename internal::remove_reference::type _RhsNested; #if EIGEN_COMP_MSVC && EIGEN_HAS_CXX11 //Required for Visual Studio or the Copy constructor will probably not get inlined! EIGEN_STRONG_INLINE CwiseBinaryOp(const CwiseBinaryOp&) = default; #endif EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, const BinaryOp& func = BinaryOp()) : m_lhs(aLhs), m_rhs(aRhs), m_functor(func) { eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { // return the fixed size type if available to enable compile time optimizations return internal::traits::type>::RowsAtCompileTime==Dynamic ? m_rhs.rows() : m_lhs.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { // return the fixed size type if available to enable compile time optimizations return internal::traits::type>::ColsAtCompileTime==Dynamic ? m_rhs.cols() : m_lhs.cols(); } /** \returns the left hand side nested expression */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } /** \returns the right hand side nested expression */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } /** \returns the functor representing the binary operation */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const BinaryOp& functor() const { return m_functor; } protected: LhsNested m_lhs; RhsNested m_rhs; const BinaryOp m_functor; }; // Generic API dispatcher template class CwiseBinaryOpImpl : public internal::generic_xpr_base >::type { public: typedef typename internal::generic_xpr_base >::type Base; }; /** replaces \c *this by \c *this - \a other. * * \returns a reference to \c *this */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & MatrixBase::operator-=(const MatrixBase &other) { call_assignment(derived(), other.derived(), internal::sub_assign_op()); return derived(); } /** replaces \c *this by \c *this + \a other. * * \returns a reference to \c *this */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & MatrixBase::operator+=(const MatrixBase& other) { call_assignment(derived(), other.derived(), internal::add_assign_op()); return derived(); } } // end namespace Eigen #endif // EIGEN_CWISE_BINARY_OP_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/CwiseNullaryOp.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_NULLARY_OP_H #define EIGEN_CWISE_NULLARY_OP_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > : traits { enum { Flags = traits::Flags & RowMajorBit }; }; } // namespace internal /** \class CwiseNullaryOp * \ingroup Core_Module * * \brief Generic expression of a matrix where all coefficients are defined by a functor * * \tparam NullaryOp template functor implementing the operator * \tparam PlainObjectType the underlying plain matrix/array type * * This class represents an expression of a generic nullary operator. * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods, * and most of the time this is the only way it is used. * * However, if you want to write a function returning such an expression, you * will need to use this class. * * The functor NullaryOp must expose one of the following method:
\c operator()() if the procedural generation does not depend on the coefficient entries (e.g., random numbers)
\c operator()(Index i)if the procedural generation makes sense for vectors only and that it depends on the coefficient index \c i (e.g., linspace)
\c operator()(Index i,Index j)if the procedural generation depends on the matrix coordinates \c i, \c j (e.g., to generate a checkerboard with 0 and 1)
* It is also possible to expose the last two operators if the generation makes sense for matrices but can be optimized for vectors. * * See DenseBase::NullaryExpr(Index,const CustomNullaryOp&) for an example binding * C++11 random number generators. * * A nullary expression can also be used to implement custom sophisticated matrix manipulations * that cannot be covered by the existing set of natively supported matrix manipulations. * See this \ref TopicCustomizing_NullaryExpr "page" for some examples and additional explanations * on the behavior of CwiseNullaryOp. * * \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr */ template class CwiseNullaryOp : public internal::dense_xpr_base< CwiseNullaryOp >::type, internal::no_assignment_operator { public: typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp) EIGEN_DEVICE_FUNC CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp()) : m_rows(rows), m_cols(cols), m_functor(func) { eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const { return m_rows.value(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const { return m_cols.value(); } /** \returns the functor representing the nullary operation */ EIGEN_DEVICE_FUNC const NullaryOp& functor() const { return m_functor; } protected: const internal::variable_if_dynamic m_rows; const internal::variable_if_dynamic m_cols; const NullaryOp m_functor; }; /** \returns an expression of a matrix defined by a custom functor \a func * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used * instead. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE #ifndef EIGEN_PARSED_BY_DOXYGEN const CwiseNullaryOp::PlainObject> #else const CwiseNullaryOp #endif DenseBase::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func) { return CwiseNullaryOp(rows, cols, func); } /** \returns an expression of a matrix defined by a custom functor \a func * * The parameter \a size is the size of the returned vector. * Must be compatible with this MatrixBase type. * * \only_for_vectors * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Zero() should be used * instead. * * The template parameter \a CustomNullaryOp is the type of the functor. * * Here is an example with C++11 random generators: \include random_cpp11.cpp * Output: \verbinclude random_cpp11.out * * \sa class CwiseNullaryOp */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE #ifndef EIGEN_PARSED_BY_DOXYGEN const CwiseNullaryOp::PlainObject> #else const CwiseNullaryOp #endif DenseBase::NullaryExpr(Index size, const CustomNullaryOp& func) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) if(RowsAtCompileTime == 1) return CwiseNullaryOp(1, size, func); else return CwiseNullaryOp(size, 1, func); } /** \returns an expression of a matrix defined by a custom functor \a func * * This variant is only for fixed-size DenseBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE #ifndef EIGEN_PARSED_BY_DOXYGEN const CwiseNullaryOp::PlainObject> #else const CwiseNullaryOp #endif DenseBase::NullaryExpr(const CustomNullaryOp& func) { return CwiseNullaryOp(RowsAtCompileTime, ColsAtCompileTime, func); } /** \returns an expression of a constant matrix of value \a value * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this DenseBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used * instead. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Constant(Index rows, Index cols, const Scalar& value) { return DenseBase::NullaryExpr(rows, cols, internal::scalar_constant_op(value)); } /** \returns an expression of a constant matrix of value \a value * * The parameter \a size is the size of the returned vector. * Must be compatible with this DenseBase type. * * \only_for_vectors * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Zero() should be used * instead. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Constant(Index size, const Scalar& value) { return DenseBase::NullaryExpr(size, internal::scalar_constant_op(value)); } /** \returns an expression of a constant matrix of value \a value * * This variant is only for fixed-size DenseBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Constant(const Scalar& value) { EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) return DenseBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op(value)); } /** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(Index,const Scalar&,const Scalar&) * * \only_for_vectors * * Example: \include DenseBase_LinSpaced_seq_deprecated.cpp * Output: \verbinclude DenseBase_LinSpaced_seq_deprecated.out * * \sa LinSpaced(Index,const Scalar&, const Scalar&), setLinSpaced(Index,const Scalar&,const Scalar&) */ template EIGEN_DEPRECATED EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType DenseBase::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return DenseBase::NullaryExpr(size, internal::linspaced_op(low,high,size)); } /** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(const Scalar&,const Scalar&) * * \sa LinSpaced(const Scalar&, const Scalar&) */ template EIGEN_DEPRECATED EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType DenseBase::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op(low,high,Derived::SizeAtCompileTime)); } /** * \brief Sets a linearly spaced vector. * * The function generates 'size' equally spaced values in the closed interval [low,high]. * When size is set to 1, a vector of length 1 containing 'high' is returned. * * \only_for_vectors * * Example: \include DenseBase_LinSpaced.cpp * Output: \verbinclude DenseBase_LinSpaced.out * * For integer scalar types, an even spacing is possible if and only if the length of the range, * i.e., \c high-low is a scalar multiple of \c size-1, or if \c size is a scalar multiple of the * number of values \c high-low+1 (meaning each value can be repeated the same number of time). * If one of these two considions is not satisfied, then \c high is lowered to the largest value * satisfying one of this constraint. * Here are some examples: * * Example: \include DenseBase_LinSpacedInt.cpp * Output: \verbinclude DenseBase_LinSpacedInt.out * * \sa setLinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType DenseBase::LinSpaced(Index size, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return DenseBase::NullaryExpr(size, internal::linspaced_op(low,high,size)); } /** * \copydoc DenseBase::LinSpaced(Index, const Scalar&, const Scalar&) * Special version for fixed size types which does not require the size parameter. */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType DenseBase::LinSpaced(const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op(low,high,Derived::SizeAtCompileTime)); } /** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */ template EIGEN_DEVICE_FUNC bool DenseBase::isApproxToConstant (const Scalar& val, const RealScalar& prec) const { typename internal::nested_eval::type self(derived()); for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if(!internal::isApprox(self.coeff(i, j), val, prec)) return false; return true; } /** This is just an alias for isApproxToConstant(). * * \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ template EIGEN_DEVICE_FUNC bool DenseBase::isConstant (const Scalar& val, const RealScalar& prec) const { return isApproxToConstant(val, prec); } /** Alias for setConstant(): sets all coefficients in this expression to \a val. * * \sa setConstant(), Constant(), class CwiseNullaryOp */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void DenseBase::fill(const Scalar& val) { setConstant(val); } /** Sets all coefficients in this expression to value \a val. * * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setConstant(const Scalar& val) { return derived() = Constant(rows(), cols(), val); } /** Resizes to the given \a size, and sets all coefficients in this expression to the given value \a val. * * \only_for_vectors * * Example: \include Matrix_setConstant_int.cpp * Output: \verbinclude Matrix_setConstant_int.out * * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setConstant(Index size, const Scalar& val) { resize(size); return setConstant(val); } /** Resizes to the given size, and sets all coefficients in this expression to the given value \a val. * * \param rows the new number of rows * \param cols the new number of columns * \param val the value to which all coefficients are set * * Example: \include Matrix_setConstant_int_int.cpp * Output: \verbinclude Matrix_setConstant_int_int.out * * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setConstant(Index rows, Index cols, const Scalar& val) { resize(rows, cols); return setConstant(val); } /** Resizes to the given size, changing only the number of columns, and sets all * coefficients in this expression to the given value \a val. For the parameter * of type NoChange_t, just pass the special value \c NoChange. * * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setConstant(NoChange_t, Index cols, const Scalar& val) { return setConstant(rows(), cols, val); } /** Resizes to the given size, changing only the number of rows, and sets all * coefficients in this expression to the given value \a val. For the parameter * of type NoChange_t, just pass the special value \c NoChange. * * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setConstant(Index rows, NoChange_t, const Scalar& val) { return setConstant(rows, cols(), val); } /** * \brief Sets a linearly spaced vector. * * The function generates 'size' equally spaced values in the closed interval [low,high]. * When size is set to 1, a vector of length 1 containing 'high' is returned. * * \only_for_vectors * * Example: \include DenseBase_setLinSpaced.cpp * Output: \verbinclude DenseBase_setLinSpaced.out * * For integer scalar types, do not miss the explanations on the definition * of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink. * * \sa LinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op(low,high,newSize)); } /** * \brief Sets a linearly spaced vector. * * The function fills \c *this with equally spaced values in the closed interval [low,high]. * When size is set to 1, a vector of length 1 containing 'high' is returned. * * \only_for_vectors * * For integer scalar types, do not miss the explanations on the definition * of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink. * * \sa LinSpaced(Index,const Scalar&,const Scalar&), setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return setLinSpaced(size(), low, high); } // zero: /** \returns an expression of a zero matrix. * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used * instead. * * Example: \include MatrixBase_zero_int_int.cpp * Output: \verbinclude MatrixBase_zero_int_int.out * * \sa Zero(), Zero(Index) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Zero(Index rows, Index cols) { return Constant(rows, cols, Scalar(0)); } /** \returns an expression of a zero vector. * * The parameter \a size is the size of the returned vector. * Must be compatible with this MatrixBase type. * * \only_for_vectors * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Zero() should be used * instead. * * Example: \include MatrixBase_zero_int.cpp * Output: \verbinclude MatrixBase_zero_int.out * * \sa Zero(), Zero(Index,Index) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Zero(Index size) { return Constant(size, Scalar(0)); } /** \returns an expression of a fixed-size zero matrix or vector. * * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * Example: \include MatrixBase_zero.cpp * Output: \verbinclude MatrixBase_zero.out * * \sa Zero(Index), Zero(Index,Index) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Zero() { return Constant(Scalar(0)); } /** \returns true if *this is approximately equal to the zero matrix, * within the precision given by \a prec. * * Example: \include MatrixBase_isZero.cpp * Output: \verbinclude MatrixBase_isZero.out * * \sa class CwiseNullaryOp, Zero() */ template EIGEN_DEVICE_FUNC bool DenseBase::isZero(const RealScalar& prec) const { typename internal::nested_eval::type self(derived()); for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if(!internal::isMuchSmallerThan(self.coeff(i, j), static_cast(1), prec)) return false; return true; } /** Sets all coefficients in this expression to zero. * * Example: \include MatrixBase_setZero.cpp * Output: \verbinclude MatrixBase_setZero.out * * \sa class CwiseNullaryOp, Zero() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setZero() { return setConstant(Scalar(0)); } /** Resizes to the given \a size, and sets all coefficients in this expression to zero. * * \only_for_vectors * * Example: \include Matrix_setZero_int.cpp * Output: \verbinclude Matrix_setZero_int.out * * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setZero(Index newSize) { resize(newSize); return setConstant(Scalar(0)); } /** Resizes to the given size, and sets all coefficients in this expression to zero. * * \param rows the new number of rows * \param cols the new number of columns * * Example: \include Matrix_setZero_int_int.cpp * Output: \verbinclude Matrix_setZero_int_int.out * * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setZero(Index rows, Index cols) { resize(rows, cols); return setConstant(Scalar(0)); } /** Resizes to the given size, changing only the number of columns, and sets all * coefficients in this expression to zero. For the parameter of type NoChange_t, * just pass the special value \c NoChange. * * \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(Index, NoChange_t), class CwiseNullaryOp, DenseBase::Zero() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setZero(NoChange_t, Index cols) { return setZero(rows(), cols); } /** Resizes to the given size, changing only the number of rows, and sets all * coefficients in this expression to zero. For the parameter of type NoChange_t, * just pass the special value \c NoChange. * * \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(NoChange_t, Index), class CwiseNullaryOp, DenseBase::Zero() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setZero(Index rows, NoChange_t) { return setZero(rows, cols()); } // ones: /** \returns an expression of a matrix where all coefficients equal one. * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used * instead. * * Example: \include MatrixBase_ones_int_int.cpp * Output: \verbinclude MatrixBase_ones_int_int.out * * \sa Ones(), Ones(Index), isOnes(), class Ones */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Ones(Index rows, Index cols) { return Constant(rows, cols, Scalar(1)); } /** \returns an expression of a vector where all coefficients equal one. * * The parameter \a newSize is the size of the returned vector. * Must be compatible with this MatrixBase type. * * \only_for_vectors * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Ones() should be used * instead. * * Example: \include MatrixBase_ones_int.cpp * Output: \verbinclude MatrixBase_ones_int.out * * \sa Ones(), Ones(Index,Index), isOnes(), class Ones */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Ones(Index newSize) { return Constant(newSize, Scalar(1)); } /** \returns an expression of a fixed-size matrix or vector where all coefficients equal one. * * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * Example: \include MatrixBase_ones.cpp * Output: \verbinclude MatrixBase_ones.out * * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Ones() { return Constant(Scalar(1)); } /** \returns true if *this is approximately equal to the matrix where all coefficients * are equal to 1, within the precision given by \a prec. * * Example: \include MatrixBase_isOnes.cpp * Output: \verbinclude MatrixBase_isOnes.out * * \sa class CwiseNullaryOp, Ones() */ template EIGEN_DEVICE_FUNC bool DenseBase::isOnes (const RealScalar& prec) const { return isApproxToConstant(Scalar(1), prec); } /** Sets all coefficients in this expression to one. * * Example: \include MatrixBase_setOnes.cpp * Output: \verbinclude MatrixBase_setOnes.out * * \sa class CwiseNullaryOp, Ones() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase::setOnes() { return setConstant(Scalar(1)); } /** Resizes to the given \a newSize, and sets all coefficients in this expression to one. * * \only_for_vectors * * Example: \include Matrix_setOnes_int.cpp * Output: \verbinclude Matrix_setOnes_int.out * * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setOnes(Index newSize) { resize(newSize); return setConstant(Scalar(1)); } /** Resizes to the given size, and sets all coefficients in this expression to one. * * \param rows the new number of rows * \param cols the new number of columns * * Example: \include Matrix_setOnes_int_int.cpp * Output: \verbinclude Matrix_setOnes_int_int.out * * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setOnes(Index rows, Index cols) { resize(rows, cols); return setConstant(Scalar(1)); } /** Resizes to the given size, changing only the number of rows, and sets all * coefficients in this expression to one. For the parameter of type NoChange_t, * just pass the special value \c NoChange. * * \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(NoChange_t, Index), class CwiseNullaryOp, MatrixBase::Ones() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setOnes(Index rows, NoChange_t) { return setOnes(rows, cols()); } /** Resizes to the given size, changing only the number of columns, and sets all * coefficients in this expression to one. For the parameter of type NoChange_t, * just pass the special value \c NoChange. * * \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(Index, NoChange_t) class CwiseNullaryOp, MatrixBase::Ones() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase::setOnes(NoChange_t, Index cols) { return setOnes(rows(), cols); } // Identity: /** \returns an expression of the identity matrix (not necessarily square). * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used * instead. * * Example: \include MatrixBase_identity_int_int.cpp * Output: \verbinclude MatrixBase_identity_int_int.out * * \sa Identity(), setIdentity(), isIdentity() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType MatrixBase::Identity(Index rows, Index cols) { return DenseBase::NullaryExpr(rows, cols, internal::scalar_identity_op()); } /** \returns an expression of the identity matrix (not necessarily square). * * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you * need to use the variant taking size arguments. * * Example: \include MatrixBase_identity.cpp * Output: \verbinclude MatrixBase_identity.out * * \sa Identity(Index,Index), setIdentity(), isIdentity() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType MatrixBase::Identity() { EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) return MatrixBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op()); } /** \returns true if *this is approximately equal to the identity matrix * (not necessarily square), * within the precision given by \a prec. * * Example: \include MatrixBase_isIdentity.cpp * Output: \verbinclude MatrixBase_isIdentity.out * * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity() */ template bool MatrixBase::isIdentity (const RealScalar& prec) const { typename internal::nested_eval::type self(derived()); for(Index j = 0; j < cols(); ++j) { for(Index i = 0; i < rows(); ++i) { if(i == j) { if(!internal::isApprox(self.coeff(i, j), static_cast(1), prec)) return false; } else { if(!internal::isMuchSmallerThan(self.coeff(i, j), static_cast(1), prec)) return false; } } } return true; } namespace internal { template=16)> struct setIdentity_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Derived& run(Derived& m) { return m = Derived::Identity(m.rows(), m.cols()); } }; template struct setIdentity_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Derived& run(Derived& m) { m.setZero(); const Index size = numext::mini(m.rows(), m.cols()); for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1); return m; } }; } // end namespace internal /** Writes the identity expression (not necessarily square) into *this. * * Example: \include MatrixBase_setIdentity.cpp * Output: \verbinclude MatrixBase_setIdentity.out * * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity() { return internal::setIdentity_impl::run(derived()); } /** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this. * * \param rows the new number of rows * \param cols the new number of columns * * Example: \include Matrix_setIdentity_int_int.cpp * Output: \verbinclude Matrix_setIdentity_int_int.out * * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity(Index rows, Index cols) { derived().resize(rows, cols); return setIdentity(); } /** \returns an expression of the i-th unit (basis) vector. * * \only_for_vectors * * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(Index newSize, Index i) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return BasisReturnType(SquareMatrixType::Identity(newSize,newSize), i); } /** \returns an expression of the i-th unit (basis) vector. * * \only_for_vectors * * This variant is for fixed-size vector only. * * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(Index i) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return BasisReturnType(SquareMatrixType::Identity(),i); } /** \returns an expression of the X axis unit vector (1{,0}^*) * * \only_for_vectors * * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitX() { return Derived::Unit(0); } /** \returns an expression of the Y axis unit vector (0,1{,0}^*) * * \only_for_vectors * * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitY() { return Derived::Unit(1); } /** \returns an expression of the Z axis unit vector (0,0,1{,0}^*) * * \only_for_vectors * * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitZ() { return Derived::Unit(2); } /** \returns an expression of the W axis unit vector (0,0,0,1) * * \only_for_vectors * * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitW() { return Derived::Unit(3); } /** \brief Set the coefficients of \c *this to the i-th unit (basis) vector * * \param i index of the unique coefficient to be set to 1 * * \only_for_vectors * * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Unit(Index,Index) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::setUnit(Index i) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); eigen_assert(i EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase::setUnit(Index newSize, Index i) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); eigen_assert(i // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2016 Eugene Brevdo // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_TERNARY_OP_H #define EIGEN_CWISE_TERNARY_OP_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > { // we must not inherit from traits since it has // the potential to cause problems with MSVC typedef typename remove_all::type Ancestor; typedef typename traits::XprKind XprKind; enum { RowsAtCompileTime = traits::RowsAtCompileTime, ColsAtCompileTime = traits::ColsAtCompileTime, MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, MaxColsAtCompileTime = traits::MaxColsAtCompileTime }; // even though we require Arg1, Arg2, and Arg3 to have the same scalar type // (see CwiseTernaryOp constructor), // we still want to handle the case when the result type is different. typedef typename result_of::type Scalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::StorageIndex StorageIndex; typedef typename Arg1::Nested Arg1Nested; typedef typename Arg2::Nested Arg2Nested; typedef typename Arg3::Nested Arg3Nested; typedef typename remove_reference::type _Arg1Nested; typedef typename remove_reference::type _Arg2Nested; typedef typename remove_reference::type _Arg3Nested; enum { Flags = _Arg1Nested::Flags & RowMajorBit }; }; } // end namespace internal template class CwiseTernaryOpImpl; /** \class CwiseTernaryOp * \ingroup Core_Module * * \brief Generic expression where a coefficient-wise ternary operator is * applied to two expressions * * \tparam TernaryOp template functor implementing the operator * \tparam Arg1Type the type of the first argument * \tparam Arg2Type the type of the second argument * \tparam Arg3Type the type of the third argument * * This class represents an expression where a coefficient-wise ternary * operator is applied to three expressions. * It is the return type of ternary operators, by which we mean only those * ternary operators where * all three arguments are Eigen expressions. * For example, the return type of betainc(matrix1, matrix2, matrix3) is a * CwiseTernaryOp. * * Most of the time, this is the only way that it is used, so you typically * don't have to name * CwiseTernaryOp types explicitly. * * \sa MatrixBase::ternaryExpr(const MatrixBase &, const * MatrixBase &, const CustomTernaryOp &) const, class CwiseBinaryOp, * class CwiseUnaryOp, class CwiseNullaryOp */ template class CwiseTernaryOp : public CwiseTernaryOpImpl< TernaryOp, Arg1Type, Arg2Type, Arg3Type, typename internal::traits::StorageKind>, internal::no_assignment_operator { public: typedef typename internal::remove_all::type Arg1; typedef typename internal::remove_all::type Arg2; typedef typename internal::remove_all::type Arg3; // require the sizes to match EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg2) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg3) // The index types should match EIGEN_STATIC_ASSERT((internal::is_same< typename internal::traits::StorageKind, typename internal::traits::StorageKind>::value), STORAGE_KIND_MUST_MATCH) EIGEN_STATIC_ASSERT((internal::is_same< typename internal::traits::StorageKind, typename internal::traits::StorageKind>::value), STORAGE_KIND_MUST_MATCH) typedef typename CwiseTernaryOpImpl< TernaryOp, Arg1Type, Arg2Type, Arg3Type, typename internal::traits::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseTernaryOp) typedef typename internal::ref_selector::type Arg1Nested; typedef typename internal::ref_selector::type Arg2Nested; typedef typename internal::ref_selector::type Arg3Nested; typedef typename internal::remove_reference::type _Arg1Nested; typedef typename internal::remove_reference::type _Arg2Nested; typedef typename internal::remove_reference::type _Arg3Nested; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CwiseTernaryOp(const Arg1& a1, const Arg2& a2, const Arg3& a3, const TernaryOp& func = TernaryOp()) : m_arg1(a1), m_arg2(a2), m_arg3(a3), m_functor(func) { eigen_assert(a1.rows() == a2.rows() && a1.cols() == a2.cols() && a1.rows() == a3.rows() && a1.cols() == a3.cols()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { // return the fixed size type if available to enable compile time // optimizations if (internal::traits::type>:: RowsAtCompileTime == Dynamic && internal::traits::type>:: RowsAtCompileTime == Dynamic) return m_arg3.rows(); else if (internal::traits::type>:: RowsAtCompileTime == Dynamic && internal::traits::type>:: RowsAtCompileTime == Dynamic) return m_arg2.rows(); else return m_arg1.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { // return the fixed size type if available to enable compile time // optimizations if (internal::traits::type>:: ColsAtCompileTime == Dynamic && internal::traits::type>:: ColsAtCompileTime == Dynamic) return m_arg3.cols(); else if (internal::traits::type>:: ColsAtCompileTime == Dynamic && internal::traits::type>:: ColsAtCompileTime == Dynamic) return m_arg2.cols(); else return m_arg1.cols(); } /** \returns the first argument nested expression */ EIGEN_DEVICE_FUNC const _Arg1Nested& arg1() const { return m_arg1; } /** \returns the first argument nested expression */ EIGEN_DEVICE_FUNC const _Arg2Nested& arg2() const { return m_arg2; } /** \returns the third argument nested expression */ EIGEN_DEVICE_FUNC const _Arg3Nested& arg3() const { return m_arg3; } /** \returns the functor representing the ternary operation */ EIGEN_DEVICE_FUNC const TernaryOp& functor() const { return m_functor; } protected: Arg1Nested m_arg1; Arg2Nested m_arg2; Arg3Nested m_arg3; const TernaryOp m_functor; }; // Generic API dispatcher template class CwiseTernaryOpImpl : public internal::generic_xpr_base< CwiseTernaryOp >::type { public: typedef typename internal::generic_xpr_base< CwiseTernaryOp >::type Base; }; } // end namespace Eigen #endif // EIGEN_CWISE_TERNARY_OP_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/CwiseUnaryOp.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_UNARY_OP_H #define EIGEN_CWISE_UNARY_OP_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > : traits { typedef typename result_of< UnaryOp(const typename XprType::Scalar&) >::type Scalar; typedef typename XprType::Nested XprTypeNested; typedef typename remove_reference::type _XprTypeNested; enum { Flags = _XprTypeNested::Flags & RowMajorBit }; }; } template class CwiseUnaryOpImpl; /** \class CwiseUnaryOp * \ingroup Core_Module * * \brief Generic expression where a coefficient-wise unary operator is applied to an expression * * \tparam UnaryOp template functor implementing the operator * \tparam XprType the type of the expression to which we are applying the unary operator * * This class represents an expression where a unary operator is applied to an expression. * It is the return type of all operations taking exactly 1 input expression, regardless of the * presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix * is considered unary, because only the right-hand side is an expression, and its * return type is a specialization of CwiseUnaryOp. * * Most of the time, this is the only way that it is used, so you typically don't have to name * CwiseUnaryOp types explicitly. * * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp */ template class CwiseUnaryOp : public CwiseUnaryOpImpl::StorageKind>, internal::no_assignment_operator { public: typedef typename CwiseUnaryOpImpl::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp) typedef typename internal::ref_selector::type XprTypeNested; typedef typename internal::remove_all::type NestedExpression; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) : m_xpr(xpr), m_functor(func) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_xpr.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_xpr.cols(); } /** \returns the functor representing the unary operation */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const UnaryOp& functor() const { return m_functor; } /** \returns the nested expression */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename internal::remove_all::type& nestedExpression() const { return m_xpr; } /** \returns the nested expression */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::remove_all::type& nestedExpression() { return m_xpr; } protected: XprTypeNested m_xpr; const UnaryOp m_functor; }; // Generic API dispatcher template class CwiseUnaryOpImpl : public internal::generic_xpr_base >::type { public: typedef typename internal::generic_xpr_base >::type Base; }; } // end namespace Eigen #endif // EIGEN_CWISE_UNARY_OP_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/CwiseUnaryView.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_UNARY_VIEW_H #define EIGEN_CWISE_UNARY_VIEW_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > : traits { typedef typename result_of< ViewOp(const typename traits::Scalar&) >::type Scalar; typedef typename MatrixType::Nested MatrixTypeNested; typedef typename remove_all::type _MatrixTypeNested; enum { FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, Flags = traits<_MatrixTypeNested>::Flags & (RowMajorBit | FlagsLvalueBit | DirectAccessBit), // FIXME DirectAccessBit should not be handled by expressions MatrixTypeInnerStride = inner_stride_at_compile_time::ret, // need to cast the sizeof's from size_t to int explicitly, otherwise: // "error: no integral type can represent all of the enumerator values InnerStrideAtCompileTime = MatrixTypeInnerStride == Dynamic ? int(Dynamic) : int(MatrixTypeInnerStride) * int(sizeof(typename traits::Scalar) / sizeof(Scalar)), OuterStrideAtCompileTime = outer_stride_at_compile_time::ret == Dynamic ? int(Dynamic) : outer_stride_at_compile_time::ret * int(sizeof(typename traits::Scalar) / sizeof(Scalar)) }; }; } template class CwiseUnaryViewImpl; /** \class CwiseUnaryView * \ingroup Core_Module * * \brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector * * \tparam ViewOp template functor implementing the view * \tparam MatrixType the type of the matrix we are applying the unary operator * * This class represents a lvalue expression of a generic unary view operator of a matrix or a vector. * It is the return type of real() and imag(), and most of the time this is the only way it is used. * * \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp */ template class CwiseUnaryView : public CwiseUnaryViewImpl::StorageKind> { public: typedef typename CwiseUnaryViewImpl::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView) typedef typename internal::ref_selector::non_const_type MatrixTypeNested; typedef typename internal::remove_all::type NestedExpression; explicit EIGEN_DEVICE_FUNC inline CwiseUnaryView(MatrixType& mat, const ViewOp& func = ViewOp()) : m_matrix(mat), m_functor(func) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); } /** \returns the functor representing unary operation */ EIGEN_DEVICE_FUNC const ViewOp& functor() const { return m_functor; } /** \returns the nested expression */ EIGEN_DEVICE_FUNC const typename internal::remove_all::type& nestedExpression() const { return m_matrix; } /** \returns the nested expression */ EIGEN_DEVICE_FUNC typename internal::remove_reference::type& nestedExpression() { return m_matrix; } protected: MatrixTypeNested m_matrix; ViewOp m_functor; }; // Generic API dispatcher template class CwiseUnaryViewImpl : public internal::generic_xpr_base >::type { public: typedef typename internal::generic_xpr_base >::type Base; }; template class CwiseUnaryViewImpl : public internal::dense_xpr_base< CwiseUnaryView >::type { public: typedef CwiseUnaryView Derived; typedef typename internal::dense_xpr_base< CwiseUnaryView >::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Derived) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl) EIGEN_DEVICE_FUNC inline Scalar* data() { return &(this->coeffRef(0)); } EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(this->coeff(0)); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const { return derived().nestedExpression().innerStride() * sizeof(typename internal::traits::Scalar) / sizeof(Scalar); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const { return derived().nestedExpression().outerStride() * sizeof(typename internal::traits::Scalar) / sizeof(Scalar); } protected: EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(CwiseUnaryViewImpl) }; } // end namespace Eigen #endif // EIGEN_CWISE_UNARY_VIEW_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/DenseBase.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007-2010 Benoit Jacob // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DENSEBASE_H #define EIGEN_DENSEBASE_H #include "./InternalHeaderCheck.h" namespace Eigen { // The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type. EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE) /** \class DenseBase * \ingroup Core_Module * * \brief Base class for all dense matrices, vectors, and arrays * * This class is the base that is inherited by all dense objects (matrix, vector, arrays, * and related expression types). The common Eigen API for dense objects is contained in this class. * * \tparam Derived is the derived type, e.g., a matrix type or an expression. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN. * * \sa \blank \ref TopicClassHierarchy */ template class DenseBase #ifndef EIGEN_PARSED_BY_DOXYGEN : public DenseCoeffsBase::value> #else : public DenseCoeffsBase #endif // not EIGEN_PARSED_BY_DOXYGEN { public: /** Inner iterator type to iterate over the coefficients of a row or column. * \sa class InnerIterator */ typedef Eigen::InnerIterator InnerIterator; typedef typename internal::traits::StorageKind StorageKind; /** * \brief The type used to store indices * \details This typedef is relevant for types that store multiple indices such as * PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index * \sa \blank \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase. */ typedef typename internal::traits::StorageIndex StorageIndex; /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex, etc. */ typedef typename internal::traits::Scalar Scalar; /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex, etc. * * It is an alias for the Scalar type */ typedef Scalar value_type; typedef typename NumTraits::Real RealScalar; typedef DenseCoeffsBase::value> Base; using Base::derived; using Base::const_cast_derived; using Base::rows; using Base::cols; using Base::size; using Base::rowIndexByOuterInner; using Base::colIndexByOuterInner; using Base::coeff; using Base::coeffByOuterInner; using Base::operator(); using Base::operator[]; using Base::x; using Base::y; using Base::z; using Base::w; using Base::stride; using Base::innerStride; using Base::outerStride; using Base::rowStride; using Base::colStride; typedef typename Base::CoeffReturnType CoeffReturnType; enum { RowsAtCompileTime = internal::traits::RowsAtCompileTime, /**< The number of rows at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ ColsAtCompileTime = internal::traits::ColsAtCompileTime, /**< The number of columns at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, internal::traits::ColsAtCompileTime>::ret), /**< This is equal to the number of coefficients, i.e. the number of * rows times the number of columns, or to \a Dynamic if this is not * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, /**< This value is equal to the maximum possible number of rows that this expression * might have. If this expression might have an arbitrarily high number of rows, * this value is set to \a Dynamic. * * This value is useful to know when evaluating an expression, in order to determine * whether it is possible to avoid doing a dynamic memory allocation. * * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime */ MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime, /**< This value is equal to the maximum possible number of columns that this expression * might have. If this expression might have an arbitrarily high number of columns, * this value is set to \a Dynamic. * * This value is useful to know when evaluating an expression, in order to determine * whether it is possible to avoid doing a dynamic memory allocation. * * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime */ MaxSizeAtCompileTime = (internal::size_at_compile_time::MaxRowsAtCompileTime, internal::traits::MaxColsAtCompileTime>::ret), /**< This value is equal to the maximum possible number of coefficients that this expression * might have. If this expression might have an arbitrarily high number of coefficients, * this value is set to \a Dynamic. * * This value is useful to know when evaluating an expression, in order to determine * whether it is possible to avoid doing a dynamic memory allocation. * * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime */ IsVectorAtCompileTime = internal::traits::RowsAtCompileTime == 1 || internal::traits::ColsAtCompileTime == 1, /**< This is set to true if either the number of rows or the number of * columns is known at compile-time to be equal to 1. Indeed, in that case, * we are dealing with a column-vector (if there is only one column) or with * a row-vector (if there is only one row). */ NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0 : bool(IsVectorAtCompileTime) ? 1 : 2, /**< This value is equal to Tensor::NumDimensions, i.e. 0 for scalars, 1 for vectors, * and 2 for matrices. */ Flags = internal::traits::Flags, /**< This stores expression \ref flags flags which may or may not be inherited by new expressions * constructed from this one. See the \ref flags "list of flags". */ IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */ InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), InnerStrideAtCompileTime = internal::inner_stride_at_compile_time::ret, OuterStrideAtCompileTime = internal::outer_stride_at_compile_time::ret }; typedef typename internal::find_best_packet::type PacketScalar; enum { IsPlainObjectBase = 0 }; /** The plain matrix type corresponding to this expression. * \sa PlainObject */ typedef Matrix::Scalar, internal::traits::RowsAtCompileTime, internal::traits::ColsAtCompileTime, AutoAlign | (internal::traits::Flags&RowMajorBit ? RowMajor : ColMajor), internal::traits::MaxRowsAtCompileTime, internal::traits::MaxColsAtCompileTime > PlainMatrix; /** The plain array type corresponding to this expression. * \sa PlainObject */ typedef Array::Scalar, internal::traits::RowsAtCompileTime, internal::traits::ColsAtCompileTime, AutoAlign | (internal::traits::Flags&RowMajorBit ? RowMajor : ColMajor), internal::traits::MaxRowsAtCompileTime, internal::traits::MaxColsAtCompileTime > PlainArray; /** \brief The plain matrix or array type corresponding to this expression. * * This is not necessarily exactly the return type of eval(). In the case of plain matrices, * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed * that the return type of eval() is either PlainObject or const PlainObject&. */ typedef typename internal::conditional::XprKind,MatrixXpr >::value, PlainMatrix, PlainArray>::type PlainObject; /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index nonZeros() const { return size(); } /** \returns the outer size. * * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a * column-major matrix, and the number of rows for a row-major matrix. */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index outerSize() const { return IsVectorAtCompileTime ? 1 : int(IsRowMajor) ? this->rows() : this->cols(); } /** \returns the inner size. * * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a * column-major matrix, and the number of columns for a row-major matrix. */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index innerSize() const { return IsVectorAtCompileTime ? this->size() : int(IsRowMajor) ? this->cols() : this->rows(); } /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * nothing else. */ EIGEN_DEVICE_FUNC void resize(Index newSize) { EIGEN_ONLY_USED_FOR_DEBUG(newSize); eigen_assert(newSize == this->size() && "DenseBase::resize() does not actually allow to resize."); } /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * nothing else. */ EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { EIGEN_ONLY_USED_FOR_DEBUG(rows); EIGEN_ONLY_USED_FOR_DEBUG(cols); eigen_assert(rows == this->rows() && cols == this->cols() && "DenseBase::resize() does not actually allow to resize."); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal Represents a matrix with all coefficients equal to one another*/ typedef CwiseNullaryOp,PlainObject> ConstantReturnType; /** \internal \deprecated Represents a vector with linearly spaced coefficients that allows sequential access only. */ EIGEN_DEPRECATED typedef CwiseNullaryOp,PlainObject> SequentialLinSpacedReturnType; /** \internal Represents a vector with linearly spaced coefficients that allows random access. */ typedef CwiseNullaryOp,PlainObject> RandomAccessLinSpacedReturnType; /** \internal the return type of MatrixBase::eigenvalues() */ typedef Matrix::Scalar>::Real, internal::traits::ColsAtCompileTime, 1> EigenvaluesReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN /** Copies \a other into *this. \returns a reference to *this. */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase& other); /** Special case of the template operator=, in order to prevent the compiler * from generating a default operator= (issue hit with g++ 4.1) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase& other); template EIGEN_DEVICE_FUNC Derived& operator=(const EigenBase &other); template EIGEN_DEVICE_FUNC Derived& operator+=(const EigenBase &other); template EIGEN_DEVICE_FUNC Derived& operator-=(const EigenBase &other); template EIGEN_DEVICE_FUNC Derived& operator=(const ReturnByValue& func); /** \internal * Copies \a other into *this without evaluating other. \returns a reference to *this. */ template /** \deprecated */ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC Derived& lazyAssign(const DenseBase& other); EIGEN_DEVICE_FUNC CommaInitializer operator<< (const Scalar& s); template /** \deprecated it now returns \c *this */ EIGEN_DEPRECATED const Derived& flagged() const { return derived(); } template EIGEN_DEVICE_FUNC CommaInitializer operator<< (const DenseBase& other); typedef Transpose TransposeReturnType; EIGEN_DEVICE_FUNC TransposeReturnType transpose(); typedef typename internal::add_const >::type ConstTransposeReturnType; EIGEN_DEVICE_FUNC ConstTransposeReturnType transpose() const; EIGEN_DEVICE_FUNC void transposeInPlace(); EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(Index rows, Index cols, const Scalar& value); EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(Index size, const Scalar& value); EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(const Scalar& value); EIGEN_DEPRECATED EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high); EIGEN_DEPRECATED EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Sequential_t, const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Index size, const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(const Scalar& low, const Scalar& high); template EIGEN_DEVICE_FUNC static const CwiseNullaryOp NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func); template EIGEN_DEVICE_FUNC static const CwiseNullaryOp NullaryExpr(Index size, const CustomNullaryOp& func); template EIGEN_DEVICE_FUNC static const CwiseNullaryOp NullaryExpr(const CustomNullaryOp& func); EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index rows, Index cols); EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index size); EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(); EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index rows, Index cols); EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index size); EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(); EIGEN_DEVICE_FUNC void fill(const Scalar& value); EIGEN_DEVICE_FUNC Derived& setConstant(const Scalar& value); EIGEN_DEVICE_FUNC Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC Derived& setLinSpaced(const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC Derived& setZero(); EIGEN_DEVICE_FUNC Derived& setOnes(); EIGEN_DEVICE_FUNC Derived& setRandom(); template EIGEN_DEVICE_FUNC bool isApprox(const DenseBase& other, const RealScalar& prec = NumTraits::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const RealScalar& other, const RealScalar& prec = NumTraits::dummy_precision()) const; template EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const DenseBase& other, const RealScalar& prec = NumTraits::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isApproxToConstant(const Scalar& value, const RealScalar& prec = NumTraits::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isConstant(const Scalar& value, const RealScalar& prec = NumTraits::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isZero(const RealScalar& prec = NumTraits::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isOnes(const RealScalar& prec = NumTraits::dummy_precision()) const; inline bool hasNaN() const; inline bool allFinite() const; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*=(const Scalar& other); EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator/=(const Scalar& other); typedef typename internal::add_const_on_value_type::type>::type EvalReturnType; /** \returns the matrix or vector obtained by evaluating this expression. * * Notice that in the case of a plain matrix or vector (not an expression) this function just returns * a const reference, in order to avoid a useless copy. * * \warning Be careful with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page \endlink. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvalReturnType eval() const { // Even though MSVC does not honor strong inlining when the return type // is a dynamic matrix, we desperately need strong inlining for fixed // size types on MSVC. return typename internal::eval::type(derived()); } /** swaps *this with the expression \a other. * */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void swap(const DenseBase& other) { EIGEN_STATIC_ASSERT(!OtherDerived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); eigen_assert(rows()==other.rows() && cols()==other.cols()); call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op()); } /** swaps *this with the matrix or array \a other. * */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void swap(PlainObjectBase& other) { eigen_assert(rows()==other.rows() && cols()==other.cols()); call_assignment(derived(), other.derived(), internal::swap_assign_op()); } EIGEN_DEVICE_FUNC inline const NestByValue nestByValue() const; EIGEN_DEVICE_FUNC inline const ForceAlignedAccess forceAlignedAccess() const; EIGEN_DEVICE_FUNC inline ForceAlignedAccess forceAlignedAccess(); template EIGEN_DEVICE_FUNC inline const typename internal::conditional,Derived&>::type forceAlignedAccessIf() const; template EIGEN_DEVICE_FUNC inline typename internal::conditional,Derived&>::type forceAlignedAccessIf(); EIGEN_DEVICE_FUNC Scalar sum() const; EIGEN_DEVICE_FUNC Scalar mean() const; EIGEN_DEVICE_FUNC Scalar trace() const; EIGEN_DEVICE_FUNC Scalar prod() const; template EIGEN_DEVICE_FUNC typename internal::traits::Scalar minCoeff() const; template EIGEN_DEVICE_FUNC typename internal::traits::Scalar maxCoeff() const; // By default, the fastest version with undefined NaN propagation semantics is // used. // TODO(rmlarsen): Replace with default template argument when we move to // c++11 or beyond. EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar minCoeff() const { return minCoeff(); } EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar maxCoeff() const { return maxCoeff(); } template EIGEN_DEVICE_FUNC typename internal::traits::Scalar minCoeff(IndexType* row, IndexType* col) const; template EIGEN_DEVICE_FUNC typename internal::traits::Scalar maxCoeff(IndexType* row, IndexType* col) const; template EIGEN_DEVICE_FUNC typename internal::traits::Scalar minCoeff(IndexType* index) const; template EIGEN_DEVICE_FUNC typename internal::traits::Scalar maxCoeff(IndexType* index) const; // TODO(rmlarsen): Replace these methods with a default template argument. template EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar minCoeff(IndexType* row, IndexType* col) const { return minCoeff(row, col); } template EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar maxCoeff(IndexType* row, IndexType* col) const { return maxCoeff(row, col); } template EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar minCoeff(IndexType* index) const { return minCoeff(index); } template EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar maxCoeff(IndexType* index) const { return maxCoeff(index); } template EIGEN_DEVICE_FUNC Scalar redux(const BinaryOp& func) const; template EIGEN_DEVICE_FUNC void visit(Visitor& func) const; /** \returns a WithFormat proxy object allowing to print a matrix the with given * format \a fmt. * * See class IOFormat for some examples. * * \sa class IOFormat, class WithFormat */ inline const WithFormat format(const IOFormat& fmt) const { return WithFormat(derived(), fmt); } /** \returns the unique coefficient of a 1x1 expression */ EIGEN_DEVICE_FUNC CoeffReturnType value() const { EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); return derived().coeff(0,0); } EIGEN_DEVICE_FUNC bool all() const; EIGEN_DEVICE_FUNC bool any() const; EIGEN_DEVICE_FUNC Index count() const; typedef VectorwiseOp RowwiseReturnType; typedef const VectorwiseOp ConstRowwiseReturnType; typedef VectorwiseOp ColwiseReturnType; typedef const VectorwiseOp ConstColwiseReturnType; /** \returns a VectorwiseOp wrapper of *this for broadcasting and partial reductions * * Example: \include MatrixBase_rowwise.cpp * Output: \verbinclude MatrixBase_rowwise.out * * \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting */ //Code moved here due to a CUDA compiler bug EIGEN_DEVICE_FUNC inline ConstRowwiseReturnType rowwise() const { return ConstRowwiseReturnType(derived()); } EIGEN_DEVICE_FUNC RowwiseReturnType rowwise(); /** \returns a VectorwiseOp wrapper of *this broadcasting and partial reductions * * Example: \include MatrixBase_colwise.cpp * Output: \verbinclude MatrixBase_colwise.out * * \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting */ EIGEN_DEVICE_FUNC inline ConstColwiseReturnType colwise() const { return ConstColwiseReturnType(derived()); } EIGEN_DEVICE_FUNC ColwiseReturnType colwise(); typedef CwiseNullaryOp,PlainObject> RandomReturnType; static const RandomReturnType Random(Index rows, Index cols); static const RandomReturnType Random(Index size); static const RandomReturnType Random(); template inline EIGEN_DEVICE_FUNC const Select select(const DenseBase& thenMatrix, const DenseBase& elseMatrix) const; template inline EIGEN_DEVICE_FUNC const Select select(const DenseBase& thenMatrix, const typename ThenDerived::Scalar& elseScalar) const; template inline EIGEN_DEVICE_FUNC const Select select(const typename ElseDerived::Scalar& thenScalar, const DenseBase& elseMatrix) const; template RealScalar lpNorm() const; template EIGEN_DEVICE_FUNC const Replicate replicate() const; /** * \return an expression of the replication of \c *this * * Example: \include MatrixBase_replicate_int_int.cpp * Output: \verbinclude MatrixBase_replicate_int_int.out * * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate */ //Code moved here due to a CUDA compiler bug EIGEN_DEVICE_FUNC const Replicate replicate(Index rowFactor, Index colFactor) const { return Replicate(derived(), rowFactor, colFactor); } typedef Reverse ReverseReturnType; typedef const Reverse ConstReverseReturnType; EIGEN_DEVICE_FUNC ReverseReturnType reverse(); /** This is the const version of reverse(). */ //Code moved here due to a CUDA compiler bug EIGEN_DEVICE_FUNC ConstReverseReturnType reverse() const { return ConstReverseReturnType(derived()); } EIGEN_DEVICE_FUNC void reverseInPlace(); #ifdef EIGEN_PARSED_BY_DOXYGEN /** STL-like RandomAccessIterator * iterator type as returned by the begin() and end() methods. */ typedef random_access_iterator_type iterator; /** This is the const version of iterator (aka read-only) */ typedef random_access_iterator_type const_iterator; #else typedef typename internal::conditional< (Flags&DirectAccessBit)==DirectAccessBit, internal::pointer_based_stl_iterator, internal::generic_randaccess_stl_iterator >::type iterator_type; typedef typename internal::conditional< (Flags&DirectAccessBit)==DirectAccessBit, internal::pointer_based_stl_iterator, internal::generic_randaccess_stl_iterator >::type const_iterator_type; // Stl-style iterators are supported only for vectors. typedef typename internal::conditional< IsVectorAtCompileTime, iterator_type, void >::type iterator; typedef typename internal::conditional< IsVectorAtCompileTime, const_iterator_type, void >::type const_iterator; #endif inline iterator begin(); inline const_iterator begin() const; inline const_iterator cbegin() const; inline iterator end(); inline const_iterator end() const; inline const_iterator cend() const; #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase #define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL #define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND) #define EIGEN_DOC_UNARY_ADDONS(X,Y) # include "../plugins/CommonCwiseUnaryOps.h" # include "../plugins/BlockMethods.h" # include "../plugins/IndexedViewMethods.h" # include "../plugins/ReshapedMethods.h" # ifdef EIGEN_DENSEBASE_PLUGIN # include EIGEN_DENSEBASE_PLUGIN # endif #undef EIGEN_CURRENT_STORAGE_BASE_CLASS #undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL #undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF #undef EIGEN_DOC_UNARY_ADDONS // disable the use of evalTo for dense objects with a nice compilation error template EIGEN_DEVICE_FUNC inline void evalTo(Dest& ) const { EIGEN_STATIC_ASSERT((internal::is_same::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS); } protected: EIGEN_DEFAULT_COPY_CONSTRUCTOR(DenseBase) /** Default constructor. Do nothing. */ EIGEN_DEVICE_FUNC DenseBase() { /* Just checks for self-consistency of the flags. * Only do it when debugging Eigen, as this borders on paranoia and could slow compilation down */ #ifdef EIGEN_INTERNAL_DEBUGGING EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor)) && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))), INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION) #endif } private: EIGEN_DEVICE_FUNC explicit DenseBase(int); EIGEN_DEVICE_FUNC DenseBase(int,int); template EIGEN_DEVICE_FUNC explicit DenseBase(const DenseBase&); }; } // end namespace Eigen #endif // EIGEN_DENSEBASE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/DenseCoeffsBase.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DENSECOEFFSBASE_H #define EIGEN_DENSECOEFFSBASE_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct add_const_on_value_type_if_arithmetic { typedef typename conditional::value, T, typename add_const_on_value_type::type>::type type; }; } /** \brief Base class providing read-only coefficient access to matrices and arrays. * \ingroup Core_Module * \tparam Derived Type of the derived class * * \note #ReadOnlyAccessors Constant indicating read-only access * * This class defines the \c operator() \c const function and friends, which can be used to read specific * entries of a matrix or array. * * \sa DenseCoeffsBase, DenseCoeffsBase, * \ref TopicClassHierarchy */ template class DenseCoeffsBase : public EigenBase { public: typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; // Explanation for this CoeffReturnType typedef. // - This is the return type of the coeff() method. // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value). // - The is_arithmetic check is required since "const int", "const double", etc. will cause warnings on some systems // while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is // not possible, since the underlying expressions might not offer a valid address the reference could be referring to. typedef typename internal::conditional::Flags&LvalueBit), const Scalar&, typename internal::conditional::value, Scalar, const Scalar>::type >::type CoeffReturnType; typedef typename internal::add_const_on_value_type_if_arithmetic< typename internal::packet_traits::type >::type PacketReturnType; typedef EigenBase Base; using Base::rows; using Base::cols; using Base::size; using Base::derived; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const { return int(Derived::RowsAtCompileTime) == 1 ? 0 : int(Derived::ColsAtCompileTime) == 1 ? inner : int(Derived::Flags)&RowMajorBit ? outer : inner; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const { return int(Derived::ColsAtCompileTime) == 1 ? 0 : int(Derived::RowsAtCompileTime) == 1 ? inner : int(Derived::Flags)&RowMajorBit ? inner : outer; } /** Short version: don't use this function, use * \link operator()(Index,Index) const \endlink instead. * * Long version: this function is similar to * \link operator()(Index,Index) const \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameters \a row and \a col are in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * function equivalent to \link operator()(Index,Index) const \endlink. * * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return internal::evaluator(derived()).coeff(row,col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const { return coeff(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); } /** \returns the coefficient at given the given row and column. * * \sa operator()(Index,Index), operator[](Index) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType operator()(Index row, Index col) const { eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return coeff(row, col); } /** Short version: don't use this function, use * \link operator[](Index) const \endlink instead. * * Long version: this function is similar to * \link operator[](Index) const \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameter \a index is in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * function equivalent to \link operator[](Index) const \endlink. * * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { EIGEN_STATIC_ASSERT(internal::evaluator::Flags & LinearAccessBit, THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS) eigen_internal_assert(index >= 0 && index < size()); return internal::evaluator(derived()).coeff(index); } /** \returns the coefficient at given index. * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, * z() const, w() const */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType operator[](Index index) const { EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) eigen_assert(index >= 0 && index < size()); return coeff(index); } /** \returns the coefficient at given index. * * This is synonymous to operator[](Index) const. * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, * z() const, w() const */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType operator()(Index index) const { eigen_assert(index >= 0 && index < size()); return coeff(index); } /** equivalent to operator[](0). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType x() const { return (*this)[0]; } /** equivalent to operator[](1). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType y() const { EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=2, OUT_OF_RANGE_ACCESS); return (*this)[1]; } /** equivalent to operator[](2). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType z() const { EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=3, OUT_OF_RANGE_ACCESS); return (*this)[2]; } /** equivalent to operator[](3). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType w() const { EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=4, OUT_OF_RANGE_ACCESS); return (*this)[3]; } /** \internal * \returns the packet of coefficients starting at the given row and column. It is your responsibility * to ensure that a packet really starts there. This method is only available on expressions having the * PacketAccessBit. * * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets * starting at an address which is a multiple of the packet size. */ template EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const { typedef typename internal::packet_traits::type DefaultPacketType; eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return internal::evaluator(derived()).template packet(row,col); } /** \internal */ template EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const { return packet(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); } /** \internal * \returns the packet of coefficients starting at the given index. It is your responsibility * to ensure that a packet really starts there. This method is only available on expressions having the * PacketAccessBit and the LinearAccessBit. * * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets * starting at an address which is a multiple of the packet size. */ template EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { EIGEN_STATIC_ASSERT(internal::evaluator::Flags & LinearAccessBit, THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS) typedef typename internal::packet_traits::type DefaultPacketType; eigen_internal_assert(index >= 0 && index < size()); return internal::evaluator(derived()).template packet(index); } protected: // explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase. // But some methods are only available in the DirectAccess case. // So we add dummy methods here with these names, so that "using... " doesn't fail. // It's not private so that the child class DenseBase can access them, and it's not public // either since it's an implementation detail, so has to be protected. void coeffRef(); void coeffRefByOuterInner(); void writePacket(); void writePacketByOuterInner(); void copyCoeff(); void copyCoeffByOuterInner(); void copyPacket(); void copyPacketByOuterInner(); void stride(); void innerStride(); void outerStride(); void rowStride(); void colStride(); }; /** \brief Base class providing read/write coefficient access to matrices and arrays. * \ingroup Core_Module * \tparam Derived Type of the derived class * * \note #WriteAccessors Constant indicating read/write access * * This class defines the non-const \c operator() function and friends, which can be used to write specific * entries of a matrix or array. This class inherits DenseCoeffsBase which * defines the const variant for reading specific entries. * * \sa DenseCoeffsBase, \ref TopicClassHierarchy */ template class DenseCoeffsBase : public DenseCoeffsBase { public: typedef DenseCoeffsBase Base; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; using Base::coeff; using Base::rows; using Base::cols; using Base::size; using Base::derived; using Base::rowIndexByOuterInner; using Base::colIndexByOuterInner; using Base::operator[]; using Base::operator(); using Base::x; using Base::y; using Base::z; using Base::w; /** Short version: don't use this function, use * \link operator()(Index,Index) \endlink instead. * * Long version: this function is similar to * \link operator()(Index,Index) \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameters \a row and \a col are in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * function equivalent to \link operator()(Index,Index) \endlink. * * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return internal::evaluator(derived()).coeffRef(row,col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRefByOuterInner(Index outer, Index inner) { return coeffRef(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); } /** \returns a reference to the coefficient at given the given row and column. * * \sa operator[](Index) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index row, Index col) { eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return coeffRef(row, col); } /** Short version: don't use this function, use * \link operator[](Index) \endlink instead. * * Long version: this function is similar to * \link operator[](Index) \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameters \a row and \a col are in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * function equivalent to \link operator[](Index) \endlink. * * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { EIGEN_STATIC_ASSERT(internal::evaluator::Flags & LinearAccessBit, THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS) eigen_internal_assert(index >= 0 && index < size()); return internal::evaluator(derived()).coeffRef(index); } /** \returns a reference to the coefficient at given index. * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index) { EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) eigen_assert(index >= 0 && index < size()); return coeffRef(index); } /** \returns a reference to the coefficient at given index. * * This is synonymous to operator[](Index). * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index) { eigen_assert(index >= 0 && index < size()); return coeffRef(index); } /** equivalent to operator[](0). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& x() { return (*this)[0]; } /** equivalent to operator[](1). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& y() { EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=2, OUT_OF_RANGE_ACCESS); return (*this)[1]; } /** equivalent to operator[](2). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& z() { EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=3, OUT_OF_RANGE_ACCESS); return (*this)[2]; } /** equivalent to operator[](3). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& w() { EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=4, OUT_OF_RANGE_ACCESS); return (*this)[3]; } }; /** \brief Base class providing direct read-only coefficient access to matrices and arrays. * \ingroup Core_Module * \tparam Derived Type of the derived class * * \note #DirectAccessors Constant indicating direct access * * This class defines functions to work with strides which can be used to access entries directly. This class * inherits DenseCoeffsBase which defines functions to access entries read-only using * \c operator() . * * \sa \blank \ref TopicClassHierarchy */ template class DenseCoeffsBase : public DenseCoeffsBase { public: typedef DenseCoeffsBase Base; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; using Base::rows; using Base::cols; using Base::size; using Base::derived; /** \returns the pointer increment between two consecutive elements within a slice in the inner direction. * * \sa outerStride(), rowStride(), colStride() */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const { return derived().innerStride(); } /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns * in a column-major matrix). * * \sa innerStride(), rowStride(), colStride() */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const { return derived().outerStride(); } // FIXME shall we remove it ? EIGEN_CONSTEXPR inline Index stride() const { return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); } /** \returns the pointer increment between two consecutive rows. * * \sa innerStride(), outerStride(), colStride() */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rowStride() const { return Derived::IsRowMajor ? outerStride() : innerStride(); } /** \returns the pointer increment between two consecutive columns. * * \sa innerStride(), outerStride(), rowStride() */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index colStride() const { return Derived::IsRowMajor ? innerStride() : outerStride(); } }; /** \brief Base class providing direct read/write coefficient access to matrices and arrays. * \ingroup Core_Module * \tparam Derived Type of the derived class * * \note #DirectWriteAccessors Constant indicating direct access * * This class defines functions to work with strides which can be used to access entries directly. This class * inherits DenseCoeffsBase which defines functions to access entries read/write using * \c operator(). * * \sa \blank \ref TopicClassHierarchy */ template class DenseCoeffsBase : public DenseCoeffsBase { public: typedef DenseCoeffsBase Base; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; using Base::rows; using Base::cols; using Base::size; using Base::derived; /** \returns the pointer increment between two consecutive elements within a slice in the inner direction. * * \sa outerStride(), rowStride(), colStride() */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { return derived().innerStride(); } /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns * in a column-major matrix). * * \sa innerStride(), rowStride(), colStride() */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return derived().outerStride(); } // FIXME shall we remove it ? EIGEN_CONSTEXPR inline Index stride() const EIGEN_NOEXCEPT { return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); } /** \returns the pointer increment between two consecutive rows. * * \sa innerStride(), outerStride(), colStride() */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rowStride() const EIGEN_NOEXCEPT { return Derived::IsRowMajor ? outerStride() : innerStride(); } /** \returns the pointer increment between two consecutive columns. * * \sa innerStride(), outerStride(), rowStride() */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index colStride() const EIGEN_NOEXCEPT { return Derived::IsRowMajor ? innerStride() : outerStride(); } }; namespace internal { template struct first_aligned_impl { static EIGEN_CONSTEXPR inline Index run(const Derived&) EIGEN_NOEXCEPT { return 0; } }; template struct first_aligned_impl { static inline Index run(const Derived& m) { return internal::first_aligned(m.data(), m.size()); } }; /** \internal \returns the index of the first element of the array stored by \a m that is properly aligned with respect to \a Alignment for vectorization. * * \tparam Alignment requested alignment in Bytes. * * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more * documentation. */ template static inline Index first_aligned(const DenseBase& m) { enum { ReturnZero = (int(evaluator::Alignment) >= Alignment) || !(Derived::Flags & DirectAccessBit) }; return first_aligned_impl::run(m.derived()); } template static inline Index first_default_aligned(const DenseBase& m) { typedef typename Derived::Scalar Scalar; typedef typename packet_traits::type DefaultPacketType; return internal::first_aligned::alignment),Derived>(m); } template::ret> struct inner_stride_at_compile_time { enum { ret = traits::InnerStrideAtCompileTime }; }; template struct inner_stride_at_compile_time { enum { ret = 0 }; }; template::ret> struct outer_stride_at_compile_time { enum { ret = traits::OuterStrideAtCompileTime }; }; template struct outer_stride_at_compile_time { enum { ret = 0 }; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_DENSECOEFFSBASE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/DenseStorage.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2006-2009 Benoit Jacob // Copyright (C) 2010-2013 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIXSTORAGE_H #define EIGEN_MATRIXSTORAGE_H #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) X; EIGEN_DENSE_STORAGE_CTOR_PLUGIN; #else #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) #endif #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { struct constructor_without_unaligned_array_assert {}; template EIGEN_DEVICE_FUNC void check_static_allocation_size() { // if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit #if EIGEN_STACK_ALLOCATION_LIMIT EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); #endif } /** \internal * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned: * to 16 bytes boundary if the total size is a multiple of 16 bytes. */ template ::value > struct plain_array { T array[Size]; EIGEN_DEVICE_FUNC plain_array() { check_static_allocation_size(); } EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) { check_static_allocation_size(); } }; #if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT) #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) #elif EIGEN_GNUC_AT_LEAST(4,7) // GCC 4.7 is too aggressive in its optimizations and remove the alignment test based on the fact the array is declared to be aligned. // See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900 // Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined: template EIGEN_ALWAYS_INLINE PtrType eigen_unaligned_array_assert_workaround_gcc47(PtrType array) { return array; } #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ eigen_assert((internal::UIntPtr(eigen_unaligned_array_assert_workaround_gcc47(array)) & (sizemask)) == 0 \ && "this assertion is explained here: " \ "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ " **** READ THIS WEB PAGE !!! ****"); #else #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ eigen_assert((internal::UIntPtr(array) & (sizemask)) == 0 \ && "this assertion is explained here: " \ "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ " **** READ THIS WEB PAGE !!! ****"); #endif template struct plain_array { EIGEN_ALIGN_TO_BOUNDARY(8) T array[Size]; EIGEN_DEVICE_FUNC plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(7); check_static_allocation_size(); } EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) { check_static_allocation_size(); } }; template struct plain_array { EIGEN_ALIGN_TO_BOUNDARY(16) T array[Size]; EIGEN_DEVICE_FUNC plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(15); check_static_allocation_size(); } EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) { check_static_allocation_size(); } }; template struct plain_array { EIGEN_ALIGN_TO_BOUNDARY(32) T array[Size]; EIGEN_DEVICE_FUNC plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(31); check_static_allocation_size(); } EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) { check_static_allocation_size(); } }; template struct plain_array { EIGEN_ALIGN_TO_BOUNDARY(64) T array[Size]; EIGEN_DEVICE_FUNC plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(63); check_static_allocation_size(); } EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) { check_static_allocation_size(); } }; template struct plain_array { T array[1]; EIGEN_DEVICE_FUNC plain_array() {} EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) {} }; struct plain_array_helper { template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void copy(const plain_array& src, const Eigen::Index size, plain_array& dst) { smart_copy(src.array, src.array + size, dst.array); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void swap(plain_array& a, const Eigen::Index a_size, plain_array& b, const Eigen::Index b_size) { if (a_size < b_size) { std::swap_ranges(b.array, b.array + a_size, a.array); smart_move(b.array + a_size, b.array + b_size, a.array + a_size); } else if (a_size > b_size) { std::swap_ranges(a.array, a.array + b_size, b.array); smart_move(a.array + b_size, a.array + a_size, b.array + b_size); } else { std::swap_ranges(a.array, a.array + a_size, b.array); } } }; } // end namespace internal /** \internal * * \class DenseStorage * \ingroup Core_Module * * \brief Stores the data of a matrix * * This class stores the data of fixed-size, dynamic-size or mixed matrices * in a way as compact as possible. * * \sa Matrix */ template class DenseStorage; // purely fixed-size matrix template class DenseStorage { internal::plain_array m_data; public: EIGEN_DEVICE_FUNC DenseStorage() { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size) } EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()) {} #if !EIGEN_HAS_CXX11 || defined(EIGEN_DENSE_STORAGE_CTOR_PLUGIN) EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size) } #else EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage&) = default; #endif #if !EIGEN_HAS_CXX11 EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) m_data = other.m_data; return *this; } #else EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage&) = default; #endif #if EIGEN_HAS_RVALUE_REFERENCES #if !EIGEN_HAS_CXX11 EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT : m_data(std::move(other.m_data)) { } EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT { if (this != &other) m_data = std::move(other.m_data); return *this; } #else EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&&) = default; EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&&) = default; #endif #endif EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) eigen_internal_assert(size==rows*cols && rows==Rows_ && cols==Cols_); EIGEN_UNUSED_VARIABLE(size); EIGEN_UNUSED_VARIABLE(rows); EIGEN_UNUSED_VARIABLE(cols); } EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { numext::swap(m_data, other.m_data); } EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index rows(void) EIGEN_NOEXCEPT {return Rows_;} EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index cols(void) EIGEN_NOEXCEPT {return Cols_;} EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {} EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {} EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; } EIGEN_DEVICE_FUNC T *data() { return m_data.array; } }; // null matrix template class DenseStorage { public: EIGEN_DEVICE_FUNC DenseStorage() {} EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) {} EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage&) {} EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage&) { return *this; } EIGEN_DEVICE_FUNC DenseStorage(Index,Index,Index) {} EIGEN_DEVICE_FUNC void swap(DenseStorage& ) {} EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index rows(void) EIGEN_NOEXCEPT {return Rows_;} EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index cols(void) EIGEN_NOEXCEPT {return Cols_;} EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {} EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {} EIGEN_DEVICE_FUNC const T *data() const { return 0; } EIGEN_DEVICE_FUNC T *data() { return 0; } }; // more specializations for null matrices; these are necessary to resolve ambiguities template class DenseStorage : public DenseStorage { }; template class DenseStorage : public DenseStorage { }; template class DenseStorage : public DenseStorage { }; // dynamic-size matrix with fixed-size storage template class DenseStorage { internal::plain_array m_data; Index m_rows; Index m_cols; public: EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {} EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {} EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(other.m_rows), m_cols(other.m_cols) { internal::plain_array_helper::copy(other.m_data, m_rows * m_cols, m_data); } EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) { m_rows = other.m_rows; m_cols = other.m_cols; internal::plain_array_helper::copy(other.m_data, m_rows * m_cols, m_data); } return *this; } EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index cols) : m_rows(rows), m_cols(cols) {} EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { internal::plain_array_helper::swap(m_data, m_rows * m_cols, other.m_data, other.m_rows * other.m_cols); numext::swap(m_rows,other.m_rows); numext::swap(m_cols,other.m_cols); } EIGEN_DEVICE_FUNC Index rows() const {return m_rows;} EIGEN_DEVICE_FUNC Index cols() const {return m_cols;} EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; } EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; } EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; } EIGEN_DEVICE_FUNC T *data() { return m_data.array; } }; // dynamic-size matrix with fixed-size storage and fixed width template class DenseStorage { internal::plain_array m_data; Index m_rows; public: EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0) {} EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {} EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(other.m_rows) { internal::plain_array_helper::copy(other.m_data, m_rows * Cols_, m_data); } EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) { m_rows = other.m_rows; internal::plain_array_helper::copy(other.m_data, m_rows * Cols_, m_data); } return *this; } EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index) : m_rows(rows) {} EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { internal::plain_array_helper::swap(m_data, m_rows * Cols_, other.m_data, other.m_rows * Cols_); numext::swap(m_rows, other.m_rows); } EIGEN_DEVICE_FUNC Index rows(void) const EIGEN_NOEXCEPT {return m_rows;} EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols(void) const EIGEN_NOEXCEPT {return Cols_;} EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index) { m_rows = rows; } EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index) { m_rows = rows; } EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; } EIGEN_DEVICE_FUNC T *data() { return m_data.array; } }; // dynamic-size matrix with fixed-size storage and fixed height template class DenseStorage { internal::plain_array m_data; Index m_cols; public: EIGEN_DEVICE_FUNC DenseStorage() : m_cols(0) {} EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {} EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(other.m_cols) { internal::plain_array_helper::copy(other.m_data, Rows_ * m_cols, m_data); } EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) { m_cols = other.m_cols; internal::plain_array_helper::copy(other.m_data, Rows_ * m_cols, m_data); } return *this; } EIGEN_DEVICE_FUNC DenseStorage(Index, Index, Index cols) : m_cols(cols) {} EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { internal::plain_array_helper::swap(m_data, Rows_ * m_cols, other.m_data, Rows_ * other.m_cols); numext::swap(m_cols, other.m_cols); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows(void) const EIGEN_NOEXCEPT {return Rows_;} EIGEN_DEVICE_FUNC Index cols(void) const EIGEN_NOEXCEPT {return m_cols;} EIGEN_DEVICE_FUNC void conservativeResize(Index, Index, Index cols) { m_cols = cols; } EIGEN_DEVICE_FUNC void resize(Index, Index, Index cols) { m_cols = cols; } EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; } EIGEN_DEVICE_FUNC T *data() { return m_data.array; } }; // purely dynamic matrix. template class DenseStorage { T *m_data; Index m_rows; Index m_cols; public: EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0), m_cols(0) {} EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0), m_cols(0) {} EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto(size)), m_rows(rows), m_cols(cols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) eigen_internal_assert(size==rows*cols && rows>=0 && cols >=0); } EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(internal::conditional_aligned_new_auto(other.m_rows*other.m_cols)) , m_rows(other.m_rows) , m_cols(other.m_cols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*m_cols) internal::smart_copy(other.m_data, other.m_data+other.m_rows*other.m_cols, m_data); } EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) { DenseStorage tmp(other); this->swap(tmp); } return *this; } #if EIGEN_HAS_RVALUE_REFERENCES EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT : m_data(std::move(other.m_data)) , m_rows(std::move(other.m_rows)) , m_cols(std::move(other.m_cols)) { other.m_data = nullptr; other.m_rows = 0; other.m_cols = 0; } EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT { numext::swap(m_data, other.m_data); numext::swap(m_rows, other.m_rows); numext::swap(m_cols, other.m_cols); return *this; } #endif EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, m_rows*m_cols); } EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { numext::swap(m_data,other.m_data); numext::swap(m_rows,other.m_rows); numext::swap(m_cols,other.m_cols); } EIGEN_DEVICE_FUNC Index rows(void) const EIGEN_NOEXCEPT {return m_rows;} EIGEN_DEVICE_FUNC Index cols(void) const EIGEN_NOEXCEPT {return m_cols;} void conservativeResize(Index size, Index rows, Index cols) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*m_cols); m_rows = rows; m_cols = cols; } EIGEN_DEVICE_FUNC void resize(Index size, Index rows, Index cols) { if(size != m_rows*m_cols) { internal::conditional_aligned_delete_auto(m_data, m_rows*m_cols); if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative m_data = internal::conditional_aligned_new_auto(size); else m_data = 0; EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) } m_rows = rows; m_cols = cols; } EIGEN_DEVICE_FUNC const T *data() const { return m_data; } EIGEN_DEVICE_FUNC T *data() { return m_data; } }; // matrix with dynamic width and fixed height (so that matrix has dynamic size). template class DenseStorage { T *m_data; Index m_cols; public: EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_cols(0) {} explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {} EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto(size)), m_cols(cols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) eigen_internal_assert(size==rows*cols && rows==Rows_ && cols >=0); EIGEN_UNUSED_VARIABLE(rows); } EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(internal::conditional_aligned_new_auto(Rows_*other.m_cols)) , m_cols(other.m_cols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_cols*Rows_) internal::smart_copy(other.m_data, other.m_data+Rows_*m_cols, m_data); } EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) { DenseStorage tmp(other); this->swap(tmp); } return *this; } #if EIGEN_HAS_RVALUE_REFERENCES EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT : m_data(std::move(other.m_data)) , m_cols(std::move(other.m_cols)) { other.m_data = nullptr; other.m_cols = 0; } EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT { numext::swap(m_data, other.m_data); numext::swap(m_cols, other.m_cols); return *this; } #endif EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, Rows_*m_cols); } EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { numext::swap(m_data,other.m_data); numext::swap(m_cols,other.m_cols); } EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index rows(void) EIGEN_NOEXCEPT {return Rows_;} EIGEN_DEVICE_FUNC Index cols(void) const EIGEN_NOEXCEPT {return m_cols;} EIGEN_DEVICE_FUNC void conservativeResize(Index size, Index, Index cols) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, Rows_*m_cols); m_cols = cols; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index size, Index, Index cols) { if(size != Rows_*m_cols) { internal::conditional_aligned_delete_auto(m_data, Rows_*m_cols); if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative m_data = internal::conditional_aligned_new_auto(size); else m_data = 0; EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) } m_cols = cols; } EIGEN_DEVICE_FUNC const T *data() const { return m_data; } EIGEN_DEVICE_FUNC T *data() { return m_data; } }; // matrix with dynamic height and fixed width (so that matrix has dynamic size). template class DenseStorage { T *m_data; Index m_rows; public: EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0) {} explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {} EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto(size)), m_rows(rows) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) eigen_internal_assert(size==rows*cols && rows>=0 && cols == Cols_); EIGEN_UNUSED_VARIABLE(cols); } EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(internal::conditional_aligned_new_auto(other.m_rows*Cols_)) , m_rows(other.m_rows) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*Cols_) internal::smart_copy(other.m_data, other.m_data+other.m_rows*Cols_, m_data); } EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) { DenseStorage tmp(other); this->swap(tmp); } return *this; } #if EIGEN_HAS_RVALUE_REFERENCES EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT : m_data(std::move(other.m_data)) , m_rows(std::move(other.m_rows)) { other.m_data = nullptr; other.m_rows = 0; } EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT { numext::swap(m_data, other.m_data); numext::swap(m_rows, other.m_rows); return *this; } #endif EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, Cols_*m_rows); } EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { numext::swap(m_data,other.m_data); numext::swap(m_rows,other.m_rows); } EIGEN_DEVICE_FUNC Index rows(void) const EIGEN_NOEXCEPT {return m_rows;} EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index cols(void) {return Cols_;} void conservativeResize(Index size, Index rows, Index) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*Cols_); m_rows = rows; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index size, Index rows, Index) { if(size != m_rows*Cols_) { internal::conditional_aligned_delete_auto(m_data, Cols_*m_rows); if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative m_data = internal::conditional_aligned_new_auto(size); else m_data = 0; EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) } m_rows = rows; } EIGEN_DEVICE_FUNC const T *data() const { return m_data; } EIGEN_DEVICE_FUNC T *data() { return m_data; } }; } // end namespace Eigen #endif // EIGEN_MATRIX_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Diagonal.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007-2009 Benoit Jacob // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DIAGONAL_H #define EIGEN_DIAGONAL_H #include "./InternalHeaderCheck.h" namespace Eigen { /** \class Diagonal * \ingroup Core_Module * * \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix * * \param MatrixType the type of the object in which we are taking a sub/main/super diagonal * \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal. * A positive value means a superdiagonal, a negative value means a subdiagonal. * You can also use DynamicIndex so the index can be set at runtime. * * The matrix is not required to be square. * * This class represents an expression of the main diagonal, or any sub/super diagonal * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the * time this is the only way it is used. * * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index) */ namespace internal { template struct traits > : traits { typedef typename ref_selector::type MatrixTypeNested; typedef typename remove_reference::type _MatrixTypeNested; typedef typename MatrixType::StorageKind StorageKind; enum { RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), ColsAtCompileTime = 1, MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic : DiagIndex == DynamicIndex ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) : (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), MaxColsAtCompileTime = 1, MaskLvalueBit = is_lvalue::value ? LvalueBit : 0, Flags = (unsigned int)_MatrixTypeNested::Flags & (RowMajorBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit, // FIXME DirectAccessBit should not be handled by expressions MatrixTypeOuterStride = outer_stride_at_compile_time::ret, InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride+1, OuterStrideAtCompileTime = 0 }; }; } template class Diagonal : public internal::dense_xpr_base< Diagonal >::type { public: enum { DiagIndex = DiagIndex_ }; typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal) EIGEN_DEVICE_FUNC explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) { eigen_assert( a_index <= m_matrix.cols() && -a_index <= m_matrix.rows() ); } EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) EIGEN_DEVICE_FUNC inline Index rows() const { return m_index.value()<0 ? numext::mini(m_matrix.cols(),m_matrix.rows()+m_index.value()) : numext::mini(m_matrix.rows(),m_matrix.cols()-m_index.value()); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return 1; } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { return m_matrix.outerStride() + 1; } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return 0; } typedef typename internal::conditional< internal::is_lvalue::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.coeffRef(rowOffset(), colOffset())); } EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(m_matrix.coeffRef(rowOffset(), colOffset())); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index) { EIGEN_STATIC_ASSERT_LVALUE(MatrixType) return m_matrix.coeffRef(row+rowOffset(), row+colOffset()); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index row, Index) const { return m_matrix.coeffRef(row+rowOffset(), row+colOffset()); } EIGEN_DEVICE_FUNC inline CoeffReturnType coeff(Index row, Index) const { return m_matrix.coeff(row+rowOffset(), row+colOffset()); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index idx) { EIGEN_STATIC_ASSERT_LVALUE(MatrixType) return m_matrix.coeffRef(idx+rowOffset(), idx+colOffset()); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index idx) const { return m_matrix.coeffRef(idx+rowOffset(), idx+colOffset()); } EIGEN_DEVICE_FUNC inline CoeffReturnType coeff(Index idx) const { return m_matrix.coeff(idx+rowOffset(), idx+colOffset()); } EIGEN_DEVICE_FUNC inline const typename internal::remove_all::type& nestedExpression() const { return m_matrix; } EIGEN_DEVICE_FUNC inline Index index() const { return m_index.value(); } protected: typename internal::ref_selector::non_const_type m_matrix; const internal::variable_if_dynamicindex m_index; private: // some compilers may fail to optimize std::max etc in case of compile-time constants... EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index absDiagIndex() const EIGEN_NOEXCEPT { return m_index.value()>0 ? m_index.value() : -m_index.value(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rowOffset() const EIGEN_NOEXCEPT { return m_index.value()>0 ? 0 : -m_index.value(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index colOffset() const EIGEN_NOEXCEPT { return m_index.value()>0 ? m_index.value() : 0; } // trigger a compile-time error if someone try to call packet template typename MatrixType::PacketReturnType packet(Index) const; template typename MatrixType::PacketReturnType packet(Index,Index) const; }; /** \returns an expression of the main diagonal of the matrix \c *this * * \c *this is not required to be square. * * Example: \include MatrixBase_diagonal.cpp * Output: \verbinclude MatrixBase_diagonal.out * * \sa class Diagonal */ template EIGEN_DEVICE_FUNC inline typename MatrixBase::DiagonalReturnType MatrixBase::diagonal() { return DiagonalReturnType(derived()); } /** This is the const version of diagonal(). */ template EIGEN_DEVICE_FUNC inline typename MatrixBase::ConstDiagonalReturnType MatrixBase::diagonal() const { return ConstDiagonalReturnType(derived()); } /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this * * \c *this is not required to be square. * * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0 * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal. * * Example: \include MatrixBase_diagonal_int.cpp * Output: \verbinclude MatrixBase_diagonal_int.out * * \sa MatrixBase::diagonal(), class Diagonal */ template EIGEN_DEVICE_FUNC inline typename MatrixBase::DiagonalDynamicIndexReturnType MatrixBase::diagonal(Index index) { return DiagonalDynamicIndexReturnType(derived(), index); } /** This is the const version of diagonal(Index). */ template EIGEN_DEVICE_FUNC inline typename MatrixBase::ConstDiagonalDynamicIndexReturnType MatrixBase::diagonal(Index index) const { return ConstDiagonalDynamicIndexReturnType(derived(), index); } /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this * * \c *this is not required to be square. * * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0 * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal. * * Example: \include MatrixBase_diagonal_template_int.cpp * Output: \verbinclude MatrixBase_diagonal_template_int.out * * \sa MatrixBase::diagonal(), class Diagonal */ template template EIGEN_DEVICE_FUNC inline typename MatrixBase::template DiagonalIndexReturnType::Type MatrixBase::diagonal() { return typename DiagonalIndexReturnType::Type(derived()); } /** This is the const version of diagonal(). */ template template EIGEN_DEVICE_FUNC inline typename MatrixBase::template ConstDiagonalIndexReturnType::Type MatrixBase::diagonal() const { return typename ConstDiagonalIndexReturnType::Type(derived()); } } // end namespace Eigen #endif // EIGEN_DIAGONAL_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/DiagonalMatrix.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2007-2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DIAGONALMATRIX_H #define EIGEN_DIAGONALMATRIX_H #include "./InternalHeaderCheck.h" namespace Eigen { #ifndef EIGEN_PARSED_BY_DOXYGEN template class DiagonalBase : public EigenBase { public: typedef typename internal::traits::DiagonalVectorType DiagonalVectorType; typedef typename DiagonalVectorType::Scalar Scalar; typedef typename DiagonalVectorType::RealScalar RealScalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::StorageIndex StorageIndex; enum { RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, IsVectorAtCompileTime = 0, Flags = NoPreferredStorageOrderBit }; typedef Matrix DenseMatrixType; typedef DenseMatrixType DenseType; typedef DiagonalMatrix PlainObject; EIGEN_DEVICE_FUNC inline const Derived& derived() const { return *static_cast(this); } EIGEN_DEVICE_FUNC inline Derived& derived() { return *static_cast(this); } EIGEN_DEVICE_FUNC DenseMatrixType toDenseMatrix() const { return derived(); } EIGEN_DEVICE_FUNC inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); } EIGEN_DEVICE_FUNC inline DiagonalVectorType& diagonal() { return derived().diagonal(); } EIGEN_DEVICE_FUNC inline Index rows() const { return diagonal().size(); } EIGEN_DEVICE_FUNC inline Index cols() const { return diagonal().size(); } template EIGEN_DEVICE_FUNC const Product operator*(const MatrixBase &matrix) const { return Product(derived(),matrix.derived()); } typedef DiagonalWrapper, const DiagonalVectorType> > InverseReturnType; EIGEN_DEVICE_FUNC inline const InverseReturnType inverse() const { return InverseReturnType(diagonal().cwiseInverse()); } EIGEN_DEVICE_FUNC inline const DiagonalWrapper operator*(const Scalar& scalar) const { return DiagonalWrapper(diagonal() * scalar); } EIGEN_DEVICE_FUNC friend inline const DiagonalWrapper operator*(const Scalar& scalar, const DiagonalBase& other) { return DiagonalWrapper(scalar * other.diagonal()); } template EIGEN_DEVICE_FUNC #ifdef EIGEN_PARSED_BY_DOXYGEN inline unspecified_expression_type #else inline const DiagonalWrapper #endif operator+(const DiagonalBase& other) const { return (diagonal() + other.diagonal()).asDiagonal(); } template EIGEN_DEVICE_FUNC #ifdef EIGEN_PARSED_BY_DOXYGEN inline unspecified_expression_type #else inline const DiagonalWrapper #endif operator-(const DiagonalBase& other) const { return (diagonal() - other.diagonal()).asDiagonal(); } }; #endif /** \class DiagonalMatrix * \ingroup Core_Module * * \brief Represents a diagonal matrix with its storage * * \param Scalar_ the type of coefficients * \param SizeAtCompileTime the dimension of the matrix, or Dynamic * \param MaxSizeAtCompileTime the dimension of the matrix, or Dynamic. This parameter is optional and defaults * to SizeAtCompileTime. Most of the time, you do not need to specify it. * * \sa class DiagonalWrapper */ namespace internal { template struct traits > : traits > { typedef Matrix DiagonalVectorType; typedef DiagonalShape StorageKind; enum { Flags = LvalueBit | NoPreferredStorageOrderBit }; }; } template class DiagonalMatrix : public DiagonalBase > { public: #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename internal::traits::DiagonalVectorType DiagonalVectorType; typedef const DiagonalMatrix& Nested; typedef Scalar_ Scalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::StorageIndex StorageIndex; #endif protected: DiagonalVectorType m_diagonal; public: /** const version of diagonal(). */ EIGEN_DEVICE_FUNC inline const DiagonalVectorType& diagonal() const { return m_diagonal; } /** \returns a reference to the stored vector of diagonal coefficients. */ EIGEN_DEVICE_FUNC inline DiagonalVectorType& diagonal() { return m_diagonal; } /** Default constructor without initialization */ EIGEN_DEVICE_FUNC inline DiagonalMatrix() {} /** Constructs a diagonal matrix with given dimension */ EIGEN_DEVICE_FUNC explicit inline DiagonalMatrix(Index dim) : m_diagonal(dim) {} /** 2D constructor. */ EIGEN_DEVICE_FUNC inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {} /** 3D constructor. */ EIGEN_DEVICE_FUNC inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x,y,z) {} #if EIGEN_HAS_CXX11 /** \brief Construct a diagonal matrix with fixed size from an arbitrary number of coefficients. \cpp11 * * There exists C++98 anologue constructors for fixed-size diagonal matrices having 2 or 3 coefficients. * * \warning To construct a diagonal matrix of fixed size, the number of values passed to this * constructor must match the fixed dimension of \c *this. * * \sa DiagonalMatrix(const Scalar&, const Scalar&) * \sa DiagonalMatrix(const Scalar&, const Scalar&, const Scalar&) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DiagonalMatrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const ArgTypes&... args) : m_diagonal(a0, a1, a2, args...) {} /** \brief Constructs a DiagonalMatrix and initializes it by elements given by an initializer list of initializer * lists \cpp11 */ EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE DiagonalMatrix(const std::initializer_list>& list) : m_diagonal(list) {} #endif // EIGEN_HAS_CXX11 /** Copy constructor. */ template EIGEN_DEVICE_FUNC inline DiagonalMatrix(const DiagonalBase& other) : m_diagonal(other.diagonal()) {} #ifndef EIGEN_PARSED_BY_DOXYGEN /** copy constructor. prevent a default copy constructor from hiding the other templated constructor */ inline DiagonalMatrix(const DiagonalMatrix& other) : m_diagonal(other.diagonal()) {} #endif /** generic constructor from expression of the diagonal coefficients */ template EIGEN_DEVICE_FUNC explicit inline DiagonalMatrix(const MatrixBase& other) : m_diagonal(other) {} /** Copy operator. */ template EIGEN_DEVICE_FUNC DiagonalMatrix& operator=(const DiagonalBase& other) { m_diagonal = other.diagonal(); return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ EIGEN_DEVICE_FUNC DiagonalMatrix& operator=(const DiagonalMatrix& other) { m_diagonal = other.diagonal(); return *this; } #endif /** Resizes to given size. */ EIGEN_DEVICE_FUNC inline void resize(Index size) { m_diagonal.resize(size); } /** Sets all coefficients to zero. */ EIGEN_DEVICE_FUNC inline void setZero() { m_diagonal.setZero(); } /** Resizes and sets all coefficients to zero. */ EIGEN_DEVICE_FUNC inline void setZero(Index size) { m_diagonal.setZero(size); } /** Sets this matrix to be the identity matrix of the current size. */ EIGEN_DEVICE_FUNC inline void setIdentity() { m_diagonal.setOnes(); } /** Sets this matrix to be the identity matrix of the given size. */ EIGEN_DEVICE_FUNC inline void setIdentity(Index size) { m_diagonal.setOnes(size); } }; /** \class DiagonalWrapper * \ingroup Core_Module * * \brief Expression of a diagonal matrix * * \param DiagonalVectorType_ the type of the vector of diagonal coefficients * * This class is an expression of a diagonal matrix, but not storing its own vector of diagonal coefficients, * instead wrapping an existing vector expression. It is the return type of MatrixBase::asDiagonal() * and most of the time this is the only way that it is used. * * \sa class DiagonalMatrix, class DiagonalBase, MatrixBase::asDiagonal() */ namespace internal { template struct traits > { typedef DiagonalVectorType_ DiagonalVectorType; typedef typename DiagonalVectorType::Scalar Scalar; typedef typename DiagonalVectorType::StorageIndex StorageIndex; typedef DiagonalShape StorageKind; typedef typename traits::XprKind XprKind; enum { RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, Flags = (traits::Flags & LvalueBit) | NoPreferredStorageOrderBit }; }; } template class DiagonalWrapper : public DiagonalBase >, internal::no_assignment_operator { public: #ifndef EIGEN_PARSED_BY_DOXYGEN typedef DiagonalVectorType_ DiagonalVectorType; typedef DiagonalWrapper Nested; #endif /** Constructor from expression of diagonal coefficients to wrap. */ EIGEN_DEVICE_FUNC explicit inline DiagonalWrapper(DiagonalVectorType& a_diagonal) : m_diagonal(a_diagonal) {} /** \returns a const reference to the wrapped expression of diagonal coefficients. */ EIGEN_DEVICE_FUNC const DiagonalVectorType& diagonal() const { return m_diagonal; } protected: typename DiagonalVectorType::Nested m_diagonal; }; /** \returns a pseudo-expression of a diagonal matrix with *this as vector of diagonal coefficients * * \only_for_vectors * * Example: \include MatrixBase_asDiagonal.cpp * Output: \verbinclude MatrixBase_asDiagonal.out * * \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal() **/ template EIGEN_DEVICE_FUNC inline const DiagonalWrapper MatrixBase::asDiagonal() const { return DiagonalWrapper(derived()); } /** \returns true if *this is approximately equal to a diagonal matrix, * within the precision given by \a prec. * * Example: \include MatrixBase_isDiagonal.cpp * Output: \verbinclude MatrixBase_isDiagonal.out * * \sa asDiagonal() */ template bool MatrixBase::isDiagonal(const RealScalar& prec) const { if(cols() != rows()) return false; RealScalar maxAbsOnDiagonal = static_cast(-1); for(Index j = 0; j < cols(); ++j) { RealScalar absOnDiagonal = numext::abs(coeff(j,j)); if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal; } for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < j; ++i) { if(!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false; if(!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false; } return true; } namespace internal { template<> struct storage_kind_to_shape { typedef DiagonalShape Shape; }; struct Diagonal2Dense {}; template<> struct AssignmentKind { typedef Diagonal2Dense Kind; }; // Diagonal matrix to Dense assignment template< typename DstXprType, typename SrcXprType, typename Functor> struct Assignment { static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &/*func*/) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); dst.setZero(); dst.diagonal() = src.diagonal(); } static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op &/*func*/) { dst.diagonal() += src.diagonal(); } static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op &/*func*/) { dst.diagonal() -= src.diagonal(); } }; } // namespace internal } // end namespace Eigen #endif // EIGEN_DIAGONALMATRIX_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/DiagonalProduct.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2007-2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DIAGONALPRODUCT_H #define EIGEN_DIAGONALPRODUCT_H #include "./InternalHeaderCheck.h" namespace Eigen { /** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal. */ template template EIGEN_DEVICE_FUNC inline const Product MatrixBase::operator*(const DiagonalBase &a_diagonal) const { return Product(derived(),a_diagonal.derived()); } } // end namespace Eigen #endif // EIGEN_DIAGONALPRODUCT_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Dot.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008, 2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DOT_H #define EIGEN_DOT_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { // helper function for dot(). The problem is that if we put that in the body of dot(), then upon calling dot // with mismatched types, the compiler emits errors about failing to instantiate cwiseProduct BEFORE // looking at the static assertions. Thus this is a trick to get better compile errors. template struct dot_nocheck { typedef scalar_conj_product_op::Scalar,typename traits::Scalar> conj_prod; typedef typename conj_prod::result_type ResScalar; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static ResScalar run(const MatrixBase& a, const MatrixBase& b) { return a.template binaryExpr(b).sum(); } }; template struct dot_nocheck { typedef scalar_conj_product_op::Scalar,typename traits::Scalar> conj_prod; typedef typename conj_prod::result_type ResScalar; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static ResScalar run(const MatrixBase& a, const MatrixBase& b) { return a.transpose().template binaryExpr(b).sum(); } }; } // end namespace internal /** \fn MatrixBase::dot * \returns the dot product of *this with other. * * \only_for_vectors * * \note If the scalar type is complex numbers, then this function returns the hermitian * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the * second variable. * * \sa squaredNorm(), norm() */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ScalarBinaryOpTraits::Scalar,typename internal::traits::Scalar>::ReturnType MatrixBase::dot(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) #if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG)) typedef internal::scalar_conj_product_op func; EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar); #endif eigen_assert(size() == other.size()); return internal::dot_nocheck::run(*this, other); } //---------- implementation of L2 norm and related functions ---------- /** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the squared Frobenius norm. * In both cases, it consists in the sum of the square of all the matrix entries. * For vectors, this is also equals to the dot product of \c *this with itself. * * \sa dot(), norm(), lpNorm() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits::Scalar>::Real MatrixBase::squaredNorm() const { return numext::real((*this).cwiseAbs2().sum()); } /** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm. * In both cases, it consists in the square root of the sum of the square of all the matrix entries. * For vectors, this is also equals to the square root of the dot product of \c *this with itself. * * \sa lpNorm(), dot(), squaredNorm() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits::Scalar>::Real MatrixBase::norm() const { return numext::sqrt(squaredNorm()); } /** \returns an expression of the quotient of \c *this by its own norm. * * \warning If the input vector is too small (i.e., this->norm()==0), * then this function returns a copy of the input. * * \only_for_vectors * * \sa norm(), normalize() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::PlainObject MatrixBase::normalized() const { typedef typename internal::nested_eval::type _Nested; _Nested n(derived()); RealScalar z = n.squaredNorm(); // NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU if(z>RealScalar(0)) return n / numext::sqrt(z); else return n; } /** Normalizes the vector, i.e. divides it by its own norm. * * \only_for_vectors * * \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged. * * \sa norm(), normalized() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase::normalize() { RealScalar z = squaredNorm(); // NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU if(z>RealScalar(0)) derived() /= numext::sqrt(z); } /** \returns an expression of the quotient of \c *this by its own norm while avoiding underflow and overflow. * * \only_for_vectors * * This method is analogue to the normalized() method, but it reduces the risk of * underflow and overflow when computing the norm. * * \warning If the input vector is too small (i.e., this->norm()==0), * then this function returns a copy of the input. * * \sa stableNorm(), stableNormalize(), normalized() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase::PlainObject MatrixBase::stableNormalized() const { typedef typename internal::nested_eval::type _Nested; _Nested n(derived()); RealScalar w = n.cwiseAbs().maxCoeff(); RealScalar z = (n/w).squaredNorm(); if(z>RealScalar(0)) return n / (numext::sqrt(z)*w); else return n; } /** Normalizes the vector while avoid underflow and overflow * * \only_for_vectors * * This method is analogue to the normalize() method, but it reduces the risk of * underflow and overflow when computing the norm. * * \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged. * * \sa stableNorm(), stableNormalized(), normalize() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase::stableNormalize() { RealScalar w = cwiseAbs().maxCoeff(); RealScalar z = (derived()/w).squaredNorm(); if(z>RealScalar(0)) derived() /= numext::sqrt(z)*w; } //---------- implementation of other norms ---------- namespace internal { template struct lpNorm_selector { typedef typename NumTraits::Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const MatrixBase& m) { EIGEN_USING_STD(pow) return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p); } }; template struct lpNorm_selector { EIGEN_DEVICE_FUNC static inline typename NumTraits::Scalar>::Real run(const MatrixBase& m) { return m.cwiseAbs().sum(); } }; template struct lpNorm_selector { EIGEN_DEVICE_FUNC static inline typename NumTraits::Scalar>::Real run(const MatrixBase& m) { return m.norm(); } }; template struct lpNorm_selector { typedef typename NumTraits::Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const MatrixBase& m) { if(Derived::SizeAtCompileTime==0 || (Derived::SizeAtCompileTime==Dynamic && m.size()==0)) return RealScalar(0); return m.cwiseAbs().maxCoeff(); } }; } // end namespace internal /** \returns the \b coefficient-wise \f$ \ell^p \f$ norm of \c *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values * of the coefficients of \c *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^\infty \f$ * norm, that is the maximum of the absolute values of the coefficients of \c *this. * * In all cases, if \c *this is empty, then the value 0 is returned. * * \note For matrices, this function does not compute the operator-norm. That is, if \c *this is a matrix, then its coefficients are interpreted as a 1D vector. Nonetheless, you can easily compute the 1-norm and \f$\infty\f$-norm matrix operator norms using \link TutorialReductionsVisitorsBroadcastingReductionsNorm partial reductions \endlink. * * \sa norm() */ template template #ifndef EIGEN_PARSED_BY_DOXYGEN EIGEN_DEVICE_FUNC inline typename NumTraits::Scalar>::Real #else EIGEN_DEVICE_FUNC MatrixBase::RealScalar #endif MatrixBase::lpNorm() const { return internal::lpNorm_selector::run(*this); } //---------- implementation of isOrthogonal / isUnitary ---------- /** \returns true if *this is approximately orthogonal to \a other, * within the precision given by \a prec. * * Example: \include MatrixBase_isOrthogonal.cpp * Output: \verbinclude MatrixBase_isOrthogonal.out */ template template bool MatrixBase::isOrthogonal (const MatrixBase& other, const RealScalar& prec) const { typename internal::nested_eval::type nested(derived()); typename internal::nested_eval::type otherNested(other.derived()); return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm(); } /** \returns true if *this is approximately an unitary matrix, * within the precision given by \a prec. In the case where the \a Scalar * type is real numbers, a unitary matrix is an orthogonal matrix, whence the name. * * \note This can be used to check whether a family of vectors forms an orthonormal basis. * Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an * orthonormal basis. * * Example: \include MatrixBase_isUnitary.cpp * Output: \verbinclude MatrixBase_isUnitary.out */ template bool MatrixBase::isUnitary(const RealScalar& prec) const { typename internal::nested_eval::type self(derived()); for(Index i = 0; i < cols(); ++i) { if(!internal::isApprox(self.col(i).squaredNorm(), static_cast(1), prec)) return false; for(Index j = 0; j < i; ++j) if(!internal::isMuchSmallerThan(self.col(i).dot(self.col(j)), static_cast(1), prec)) return false; } return true; } } // end namespace Eigen #endif // EIGEN_DOT_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/EigenBase.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_EIGENBASE_H #define EIGEN_EIGENBASE_H #include "./InternalHeaderCheck.h" namespace Eigen { /** \class EigenBase * \ingroup Core_Module * * Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T). * * In other words, an EigenBase object is an object that can be copied into a MatrixBase. * * Besides MatrixBase-derived classes, this also includes special matrix classes such as diagonal matrices, etc. * * Notice that this class is trivial, it is only used to disambiguate overloaded functions. * * \sa \blank \ref TopicClassHierarchy */ template struct EigenBase { // typedef typename internal::plain_matrix_type::type PlainObject; /** \brief The interface type of indices * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. * \sa StorageIndex, \ref TopicPreprocessorDirectives. * DEPRECATED: Since Eigen 3.3, its usage is deprecated. Use Eigen::Index instead. * Deprecation is not marked with a doxygen comment because there are too many existing usages to add the deprecation attribute. */ typedef Eigen::Index Index; // FIXME is it needed? typedef typename internal::traits::StorageKind StorageKind; /** \returns a reference to the derived object */ EIGEN_DEVICE_FUNC Derived& derived() { return *static_cast(this); } /** \returns a const reference to the derived object */ EIGEN_DEVICE_FUNC const Derived& derived() const { return *static_cast(this); } EIGEN_DEVICE_FUNC inline Derived& const_cast_derived() const { return *static_cast(const_cast(this)); } EIGEN_DEVICE_FUNC inline const Derived& const_derived() const { return *static_cast(this); } /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return derived().rows(); } /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return derived().cols(); } /** \returns the number of coefficients, which is rows()*cols(). * \sa rows(), cols(), SizeAtCompileTime. */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index size() const EIGEN_NOEXCEPT { return rows() * cols(); } /** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */ template EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const { derived().evalTo(dst); } /** \internal Don't use it, but do the equivalent: \code dst += *this; \endcode */ template EIGEN_DEVICE_FUNC inline void addTo(Dest& dst) const { // This is the default implementation, // derived class can reimplement it in a more optimized way. typename Dest::PlainObject res(rows(),cols()); evalTo(res); dst += res; } /** \internal Don't use it, but do the equivalent: \code dst -= *this; \endcode */ template EIGEN_DEVICE_FUNC inline void subTo(Dest& dst) const { // This is the default implementation, // derived class can reimplement it in a more optimized way. typename Dest::PlainObject res(rows(),cols()); evalTo(res); dst -= res; } /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheRight(*this); \endcode */ template EIGEN_DEVICE_FUNC inline void applyThisOnTheRight(Dest& dst) const { // This is the default implementation, // derived class can reimplement it in a more optimized way. dst = dst * this->derived(); } /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheLeft(*this); \endcode */ template EIGEN_DEVICE_FUNC inline void applyThisOnTheLeft(Dest& dst) const { // This is the default implementation, // derived class can reimplement it in a more optimized way. dst = this->derived() * dst; } }; /*************************************************************************** * Implementation of matrix base methods ***************************************************************************/ /** \brief Copies the generic expression \a other into *this. * * \details The expression must provide a (templated) evalTo(Derived& dst) const * function which does the actual job. In practice, this allows any user to write * its own special matrix without having to modify MatrixBase * * \returns a reference to *this. */ template template EIGEN_DEVICE_FUNC Derived& DenseBase::operator=(const EigenBase &other) { call_assignment(derived(), other.derived()); return derived(); } template template EIGEN_DEVICE_FUNC Derived& DenseBase::operator+=(const EigenBase &other) { call_assignment(derived(), other.derived(), internal::add_assign_op()); return derived(); } template template EIGEN_DEVICE_FUNC Derived& DenseBase::operator-=(const EigenBase &other) { call_assignment(derived(), other.derived(), internal::sub_assign_op()); return derived(); } } // end namespace Eigen #endif // EIGEN_EIGENBASE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/ForceAlignedAccess.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_FORCEALIGNEDACCESS_H #define EIGEN_FORCEALIGNEDACCESS_H #include "./InternalHeaderCheck.h" namespace Eigen { /** \class ForceAlignedAccess * \ingroup Core_Module * * \brief Enforce aligned packet loads and stores regardless of what is requested * * \param ExpressionType the type of the object of which we are forcing aligned packet access * * This class is the return type of MatrixBase::forceAlignedAccess() * and most of the time this is the only way it is used. * * \sa MatrixBase::forceAlignedAccess() */ namespace internal { template struct traits > : public traits {}; } template class ForceAlignedAccess : public internal::dense_xpr_base< ForceAlignedAccess >::type { public: typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess) EIGEN_DEVICE_FUNC explicit inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {} EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return m_expression.outerStride(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { return m_expression.innerStride(); } EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const { return m_expression.coeff(row, col); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col) { return m_expression.const_cast_derived().coeffRef(row, col); } EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) { return m_expression.const_cast_derived().coeffRef(index); } template inline const PacketScalar packet(Index row, Index col) const { return m_expression.template packet(row, col); } template inline void writePacket(Index row, Index col, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket(row, col, x); } template inline const PacketScalar packet(Index index) const { return m_expression.template packet(index); } template inline void writePacket(Index index, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket(index, x); } EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; } protected: const ExpressionType& m_expression; private: ForceAlignedAccess& operator=(const ForceAlignedAccess&); }; /** \returns an expression of *this with forced aligned access * \sa forceAlignedAccessIf(),class ForceAlignedAccess */ template inline const ForceAlignedAccess MatrixBase::forceAlignedAccess() const { return ForceAlignedAccess(derived()); } /** \returns an expression of *this with forced aligned access * \sa forceAlignedAccessIf(), class ForceAlignedAccess */ template inline ForceAlignedAccess MatrixBase::forceAlignedAccess() { return ForceAlignedAccess(derived()); } /** \returns an expression of *this with forced aligned access if \a Enable is true. * \sa forceAlignedAccess(), class ForceAlignedAccess */ template template inline typename internal::add_const_on_value_type,Derived&>::type>::type MatrixBase::forceAlignedAccessIf() const { return derived(); // FIXME This should not work but apparently is never used } /** \returns an expression of *this with forced aligned access if \a Enable is true. * \sa forceAlignedAccess(), class ForceAlignedAccess */ template template inline typename internal::conditional,Derived&>::type MatrixBase::forceAlignedAccessIf() { return derived(); // FIXME This should not work but apparently is never used } } // end namespace Eigen #endif // EIGEN_FORCEALIGNEDACCESS_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Fuzzy.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_FUZZY_H #define EIGEN_FUZZY_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template::IsInteger> struct isApprox_selector { EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) { typename internal::nested_eval::type nested(x); typename internal::nested_eval::type otherNested(y); return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * numext::mini(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); } }; template struct isApprox_selector { EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar&) { return x.matrix() == y.matrix(); } }; template::IsInteger> struct isMuchSmallerThan_object_selector { EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) { return x.cwiseAbs2().sum() <= numext::abs2(prec) * y.cwiseAbs2().sum(); } }; template struct isMuchSmallerThan_object_selector { EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived&, const typename Derived::RealScalar&) { return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); } }; template::IsInteger> struct isMuchSmallerThan_scalar_selector { EIGEN_DEVICE_FUNC static bool run(const Derived& x, const typename Derived::RealScalar& y, const typename Derived::RealScalar& prec) { return x.cwiseAbs2().sum() <= numext::abs2(prec * y); } }; template struct isMuchSmallerThan_scalar_selector { EIGEN_DEVICE_FUNC static bool run(const Derived& x, const typename Derived::RealScalar&, const typename Derived::RealScalar&) { return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); } }; } // end namespace internal /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$ * are considered to be approximately equal within precision \f$ p \f$ if * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f] * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm * L2 norm). * * \note Because of the multiplicativeness of this comparison, one can't use this function * to check whether \c *this is approximately equal to the zero matrix or vector. * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix * or vector. If you want to test whether \c *this is zero, use internal::isMuchSmallerThan(const * RealScalar&, RealScalar) instead. * * \sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const */ template template EIGEN_DEVICE_FUNC bool DenseBase::isApprox( const DenseBase& other, const RealScalar& prec ) const { return internal::isApprox_selector::run(derived(), other.derived(), prec); } /** \returns \c true if the norm of \c *this is much smaller than \a other, * within the precision determined by \a prec. * * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f] * * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason, * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm * of a reference matrix of same dimensions. * * \sa isApprox(), isMuchSmallerThan(const DenseBase&, RealScalar) const */ template EIGEN_DEVICE_FUNC bool DenseBase::isMuchSmallerThan( const typename NumTraits::Real& other, const RealScalar& prec ) const { return internal::isMuchSmallerThan_scalar_selector::run(derived(), other, prec); } /** \returns \c true if the norm of \c *this is much smaller than the norm of \a other, * within the precision determined by \a prec. * * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f] * For matrices, the comparison is done using the Hilbert-Schmidt norm. * * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const */ template template EIGEN_DEVICE_FUNC bool DenseBase::isMuchSmallerThan( const DenseBase& other, const RealScalar& prec ) const { return internal::isMuchSmallerThan_object_selector::run(derived(), other.derived(), prec); } } // end namespace Eigen #endif // EIGEN_FUZZY_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/GeneralProduct.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2008-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_PRODUCT_H #define EIGEN_GENERAL_PRODUCT_H #include "./InternalHeaderCheck.h" namespace Eigen { enum { Large = 2, Small = 3 }; // Define the threshold value to fallback from the generic matrix-matrix product // implementation (heavy) to the lightweight coeff-based product one. // See generic_product_impl // in products/GeneralMatrixMatrix.h for more details. // TODO This threshold should also be used in the compile-time selector below. #ifndef EIGEN_GEMM_TO_COEFFBASED_THRESHOLD // This default value has been obtained on a Haswell architecture. #define EIGEN_GEMM_TO_COEFFBASED_THRESHOLD 20 #endif namespace internal { template struct product_type_selector; template struct product_size_category { enum { #ifndef EIGEN_GPU_COMPILE_PHASE is_large = MaxSize == Dynamic || Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD || (Size==Dynamic && MaxSize>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD), #else is_large = 0, #endif value = is_large ? Large : Size == 1 ? 1 : Small }; }; template struct product_type { typedef typename remove_all::type _Lhs; typedef typename remove_all::type _Rhs; enum { MaxRows = traits<_Lhs>::MaxRowsAtCompileTime, Rows = traits<_Lhs>::RowsAtCompileTime, MaxCols = traits<_Rhs>::MaxColsAtCompileTime, Cols = traits<_Rhs>::ColsAtCompileTime, MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(traits<_Lhs>::MaxColsAtCompileTime, traits<_Rhs>::MaxRowsAtCompileTime), Depth = EIGEN_SIZE_MIN_PREFER_FIXED(traits<_Lhs>::ColsAtCompileTime, traits<_Rhs>::RowsAtCompileTime) }; // the splitting into different lines of code here, introducing the _select enums and the typedef below, // is to work around an internal compiler error with gcc 4.1 and 4.2. private: enum { rows_select = product_size_category::value, cols_select = product_size_category::value, depth_select = product_size_category::value }; typedef product_type_selector selector; public: enum { value = selector::ret, ret = selector::ret }; #ifdef EIGEN_DEBUG_PRODUCT static void debug() { EIGEN_DEBUG_VAR(Rows); EIGEN_DEBUG_VAR(Cols); EIGEN_DEBUG_VAR(Depth); EIGEN_DEBUG_VAR(rows_select); EIGEN_DEBUG_VAR(cols_select); EIGEN_DEBUG_VAR(depth_select); EIGEN_DEBUG_VAR(value); } #endif }; /* The following allows to select the kind of product at compile time * based on the three dimensions of the product. * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */ // FIXME I'm not sure the current mapping is the ideal one. template struct product_type_selector { enum { ret = OuterProduct }; }; template struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; template struct product_type_selector<1, N, 1> { enum { ret = LazyCoeffBasedProductMode }; }; template struct product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; }; template<> struct product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector<1, Small,Small> { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; template<> struct product_type_selector<1, Large,Small> { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector<1, Large,Large> { enum { ret = GemvProduct }; }; template<> struct product_type_selector<1, Small,Large> { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = GemvProduct }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; } // end namespace internal /*********************************************************************** * Implementation of Inner Vector Vector Product ***********************************************************************/ // FIXME : maybe the "inner product" could return a Scalar // instead of a 1x1 matrix ?? // Pro: more natural for the user // Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix // product ends up to a row-vector times col-vector product... To tackle this use // case, we could have a specialization for Block with: operator=(Scalar x); /*********************************************************************** * Implementation of Outer Vector Vector Product ***********************************************************************/ /*********************************************************************** * Implementation of General Matrix Vector Product ***********************************************************************/ /* According to the shape/flags of the matrix we have to distinghish 3 different cases: * 1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine * 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine * 3 - all other cases are handled using a simple loop along the outer-storage direction. * Therefore we need a lower level meta selector. * Furthermore, if the matrix is the rhs, then the product has to be transposed. */ namespace internal { template struct gemv_dense_selector; } // end namespace internal namespace internal { template struct gemv_static_vector_if; template struct gemv_static_vector_if { EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; } }; template struct gemv_static_vector_if { EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Scalar* data() { return 0; } }; template struct gemv_static_vector_if { enum { ForceAlignment = internal::packet_traits::Vectorizable, PacketSize = internal::packet_traits::size }; #if EIGEN_MAX_STATIC_ALIGN_BYTES!=0 internal::plain_array m_data; EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; } #else // Some architectures cannot align on the stack, // => let's manually enforce alignment by allocating more data and return the address of the first aligned element. internal::plain_array m_data; EIGEN_STRONG_INLINE Scalar* data() { return ForceAlignment ? reinterpret_cast((internal::UIntPtr(m_data.array) & ~(std::size_t(EIGEN_MAX_ALIGN_BYTES-1))) + EIGEN_MAX_ALIGN_BYTES) : m_data.array; } #endif }; // The vector is on the left => transposition template struct gemv_dense_selector { template static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { Transpose destT(dest); enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor }; gemv_dense_selector ::run(rhs.transpose(), lhs.transpose(), destT, alpha); } }; template<> struct gemv_dense_selector { template static inline void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef typename Dest::Scalar ResScalar; typedef typename Dest::RealScalar RealScalar; typedef internal::blas_traits LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef internal::blas_traits RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef Map, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits::size)> MappedDest; ActualLhsType actualLhs = LhsBlasTraits::extract(lhs); ActualRhsType actualRhs = RhsBlasTraits::extract(rhs); ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs); // make sure Dest is a compile-time vector type (bug 1166) typedef typename conditional::type ActualDest; enum { // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 // on, the other hand it is good for the cache to pack the vector anyways... EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime==1), ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex), MightCannotUseDest = ((!EvalToDestAtCompileTime) || ComplexByReal) && (ActualDest::MaxSizeAtCompileTime!=0) }; typedef const_blas_data_mapper LhsMapper; typedef const_blas_data_mapper RhsMapper; RhsScalar compatibleAlpha = get_factor::run(actualAlpha); if(!MightCannotUseDest) { // shortcut if we are sure to be able to use dest directly, // this ease the compiler to generate cleaner and more optimzized code for most common cases general_matrix_vector_product ::run( actualLhs.rows(), actualLhs.cols(), LhsMapper(actualLhs.data(), actualLhs.outerStride()), RhsMapper(actualRhs.data(), actualRhs.innerStride()), dest.data(), 1, compatibleAlpha); } else { gemv_static_vector_if static_dest; const bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0)); const bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), evalToDest ? dest.data() : static_dest.data()); if(!evalToDest) { #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN Index size = dest.size(); EIGEN_DENSE_STORAGE_CTOR_PLUGIN #endif if(!alphaIsCompatible) { MappedDest(actualDestPtr, dest.size()).setZero(); compatibleAlpha = RhsScalar(1); } else MappedDest(actualDestPtr, dest.size()) = dest; } general_matrix_vector_product ::run( actualLhs.rows(), actualLhs.cols(), LhsMapper(actualLhs.data(), actualLhs.outerStride()), RhsMapper(actualRhs.data(), actualRhs.innerStride()), actualDestPtr, 1, compatibleAlpha); if (!evalToDest) { if(!alphaIsCompatible) dest.matrix() += actualAlpha * MappedDest(actualDestPtr, dest.size()); else dest = MappedDest(actualDestPtr, dest.size()); } } } }; template<> struct gemv_dense_selector { template static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef typename Dest::Scalar ResScalar; typedef internal::blas_traits LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef internal::blas_traits RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all::type ActualRhsTypeCleaned; typename add_const::type actualLhs = LhsBlasTraits::extract(lhs); typename add_const::type actualRhs = RhsBlasTraits::extract(rhs); ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs); enum { // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 // on, the other hand it is good for the cache to pack the vector anyways... DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1 || ActualRhsTypeCleaned::MaxSizeAtCompileTime==0 }; gemv_static_vector_if static_rhs; ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(), DirectlyUseRhs ? const_cast(actualRhs.data()) : static_rhs.data()); if(!DirectlyUseRhs) { #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN Index size = actualRhs.size(); EIGEN_DENSE_STORAGE_CTOR_PLUGIN #endif Map(actualRhsPtr, actualRhs.size()) = actualRhs; } typedef const_blas_data_mapper LhsMapper; typedef const_blas_data_mapper RhsMapper; general_matrix_vector_product ::run( actualLhs.rows(), actualLhs.cols(), LhsMapper(actualLhs.data(), actualLhs.outerStride()), RhsMapper(actualRhsPtr, 1), dest.data(), dest.col(0).innerStride(), //NOTE if dest is not a vector at compile-time, then dest.innerStride() might be wrong. (bug 1166) actualAlpha); } }; template<> struct gemv_dense_selector { template static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { EIGEN_STATIC_ASSERT((!nested_eval::Evaluate),EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE); // TODO if rhs is large enough it might be beneficial to make sure that dest is sequentially stored in memory, otherwise use a temp typename nested_eval::type actual_rhs(rhs); const Index size = rhs.rows(); for(Index k=0; k struct gemv_dense_selector { template static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { EIGEN_STATIC_ASSERT((!nested_eval::Evaluate),EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE); typename nested_eval::type actual_rhs(rhs); const Index rows = dest.rows(); for(Index i=0; i template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Product MatrixBase::operator*(const MatrixBase &other) const { // A note regarding the function declaration: In MSVC, this function will sometimes // not be inlined since DenseStorage is an unwindable object for dynamic // matrices and product types are holding a member to store the result. // Thus it does not help tagging this function with EIGEN_STRONG_INLINE. enum { ProductIsValid = Derived::ColsAtCompileTime==Dynamic || OtherDerived::RowsAtCompileTime==Dynamic || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) }; // note to the lost user: // * for a dot product use: v1.dot(v2) // * for a coeff-wise product use: v1.cwiseProduct(v2) EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) #ifdef EIGEN_DEBUG_PRODUCT internal::product_type::debug(); #endif return Product(derived(), other.derived()); } /** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation. * * The returned product will behave like any other expressions: the coefficients of the product will be * computed once at a time as requested. This might be useful in some extremely rare cases when only * a small and no coherent fraction of the result's coefficients have to be computed. * * \warning This version of the matrix product can be much much slower. So use it only if you know * what you are doing and that you measured a true speed improvement. * * \sa operator*(const MatrixBase&) */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Product MatrixBase::lazyProduct(const MatrixBase &other) const { enum { ProductIsValid = Derived::ColsAtCompileTime==Dynamic || OtherDerived::RowsAtCompileTime==Dynamic || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) }; // note to the lost user: // * for a dot product use: v1.dot(v2) // * for a coeff-wise product use: v1.cwiseProduct(v2) EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) return Product(derived(), other.derived()); } } // end namespace Eigen #endif // EIGEN_PRODUCT_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/GenericPacketMath.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERIC_PACKET_MATH_H #define EIGEN_GENERIC_PACKET_MATH_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { /** \internal * \file GenericPacketMath.h * * Default implementation for types not supported by the vectorization. * In practice these functions are provided to make easier the writing * of generic vectorized code. */ #ifndef EIGEN_DEBUG_ALIGNED_LOAD #define EIGEN_DEBUG_ALIGNED_LOAD #endif #ifndef EIGEN_DEBUG_UNALIGNED_LOAD #define EIGEN_DEBUG_UNALIGNED_LOAD #endif #ifndef EIGEN_DEBUG_ALIGNED_STORE #define EIGEN_DEBUG_ALIGNED_STORE #endif #ifndef EIGEN_DEBUG_UNALIGNED_STORE #define EIGEN_DEBUG_UNALIGNED_STORE #endif struct default_packet_traits { enum { HasHalfPacket = 0, HasAdd = 1, HasSub = 1, HasShift = 1, HasMul = 1, HasNegate = 1, HasAbs = 1, HasArg = 0, HasAbs2 = 1, HasAbsDiff = 0, HasMin = 1, HasMax = 1, HasConj = 1, HasSetLinear = 1, HasBlend = 0, // This flag is used to indicate whether packet comparison is supported. // pcmp_eq, pcmp_lt and pcmp_le should be defined for it to be true. HasCmp = 0, HasDiv = 0, HasSqrt = 0, HasRsqrt = 0, HasExp = 0, HasExpm1 = 0, HasLog = 0, HasLog1p = 0, HasLog10 = 0, HasPow = 0, HasSin = 0, HasCos = 0, HasTan = 0, HasASin = 0, HasACos = 0, HasATan = 0, HasSinh = 0, HasCosh = 0, HasTanh = 0, HasLGamma = 0, HasDiGamma = 0, HasZeta = 0, HasPolygamma = 0, HasErf = 0, HasErfc = 0, HasNdtri = 0, HasBessel = 0, HasIGamma = 0, HasIGammaDerA = 0, HasGammaSampleDerAlpha = 0, HasIGammac = 0, HasBetaInc = 0, HasRound = 0, HasRint = 0, HasFloor = 0, HasCeil = 0, HasSign = 0 }; }; template struct packet_traits : default_packet_traits { typedef T type; typedef T half; enum { Vectorizable = 0, size = 1, AlignedOnScalar = 0, HasHalfPacket = 0 }; enum { HasAdd = 0, HasSub = 0, HasMul = 0, HasNegate = 0, HasAbs = 0, HasAbs2 = 0, HasMin = 0, HasMax = 0, HasConj = 0, HasSetLinear = 0 }; }; template struct packet_traits : packet_traits { }; template struct unpacket_traits { typedef T type; typedef T half; enum { size = 1, alignment = 1, vectorizable = false, masked_load_available=false, masked_store_available=false }; }; template struct unpacket_traits : unpacket_traits { }; template struct type_casting_traits { enum { VectorizedCast = 0, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; }; /** \internal Wrapper to ensure that multiple packet types can map to the same same underlying vector type. */ template struct eigen_packet_wrapper { EIGEN_ALWAYS_INLINE operator T&() { return m_val; } EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; } EIGEN_ALWAYS_INLINE eigen_packet_wrapper() = default; EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T &v) : m_val(v) {} EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T &v) { m_val = v; return *this; } T m_val; }; /** \internal A convenience utility for determining if the type is a scalar. * This is used to enable some generic packet implementations. */ template struct is_scalar { typedef typename unpacket_traits::type Scalar; enum { value = internal::is_same::value }; }; /** \internal \returns static_cast(a) (coeff-wise) */ template EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a) { return static_cast(a); } template EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a, const SrcPacket& /*b*/) { return static_cast(a); } template EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a, const SrcPacket& /*b*/, const SrcPacket& /*c*/, const SrcPacket& /*d*/) { return static_cast(a); } template EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a, const SrcPacket& /*b*/, const SrcPacket& /*c*/, const SrcPacket& /*d*/, const SrcPacket& /*e*/, const SrcPacket& /*f*/, const SrcPacket& /*g*/, const SrcPacket& /*h*/) { return static_cast(a); } /** \internal \returns reinterpret_cast(a) */ template EIGEN_DEVICE_FUNC inline Target preinterpret(const Packet& a); /* { return reinterpret_cast(a); } */ /** \internal \returns a + b (coeff-wise) */ template EIGEN_DEVICE_FUNC inline Packet padd(const Packet& a, const Packet& b) { return a+b; } // Avoid compiler warning for boolean algebra. template<> EIGEN_DEVICE_FUNC inline bool padd(const bool& a, const bool& b) { return a || b; } /** \internal \returns a - b (coeff-wise) */ template EIGEN_DEVICE_FUNC inline Packet psub(const Packet& a, const Packet& b) { return a-b; } /** \internal \returns -a (coeff-wise) */ template EIGEN_DEVICE_FUNC inline Packet pnegate(const Packet& a) { return -a; } template<> EIGEN_DEVICE_FUNC inline bool pnegate(const bool& a) { return !a; } /** \internal \returns conj(a) (coeff-wise) */ template EIGEN_DEVICE_FUNC inline Packet pconj(const Packet& a) { return numext::conj(a); } /** \internal \returns a * b (coeff-wise) */ template EIGEN_DEVICE_FUNC inline Packet pmul(const Packet& a, const Packet& b) { return a*b; } // Avoid compiler warning for boolean algebra. template<> EIGEN_DEVICE_FUNC inline bool pmul(const bool& a, const bool& b) { return a && b; } /** \internal \returns a / b (coeff-wise) */ template EIGEN_DEVICE_FUNC inline Packet pdiv(const Packet& a, const Packet& b) { return a/b; } // In the generic case, memset to all one bits. template struct ptrue_impl { static EIGEN_DEVICE_FUNC inline Packet run(const Packet& /*a*/){ Packet b; memset(static_cast(&b), 0xff, sizeof(Packet)); return b; } }; // For non-trivial scalars, set to Scalar(1) (i.e. a non-zero value). // Although this is technically not a valid bitmask, the scalar path for pselect // uses a comparison to zero, so this should still work in most cases. We don't // have another option, since the scalar type requires initialization. template struct ptrue_impl::value && NumTraits::RequireInitialization>::type > { static EIGEN_DEVICE_FUNC inline T run(const T& /*a*/){ return T(1); } }; /** \internal \returns one bits. */ template EIGEN_DEVICE_FUNC inline Packet ptrue(const Packet& a) { return ptrue_impl::run(a); } // In the general case, memset to zero. template struct pzero_impl { static EIGEN_DEVICE_FUNC inline Packet run(const Packet& /*a*/) { Packet b; memset(static_cast(&b), 0x00, sizeof(Packet)); return b; } }; // For scalars, explicitly set to Scalar(0), since the underlying representation // for zero may not consist of all-zero bits. template struct pzero_impl::value>::type> { static EIGEN_DEVICE_FUNC inline T run(const T& /*a*/) { return T(0); } }; /** \internal \returns packet of zeros */ template EIGEN_DEVICE_FUNC inline Packet pzero(const Packet& a) { return pzero_impl::run(a); } /** \internal \returns a <= b as a bit mask */ template EIGEN_DEVICE_FUNC inline Packet pcmp_le(const Packet& a, const Packet& b) { return a<=b ? ptrue(a) : pzero(a); } /** \internal \returns a < b as a bit mask */ template EIGEN_DEVICE_FUNC inline Packet pcmp_lt(const Packet& a, const Packet& b) { return a EIGEN_DEVICE_FUNC inline Packet pcmp_eq(const Packet& a, const Packet& b) { return a==b ? ptrue(a) : pzero(a); } /** \internal \returns a < b or a==NaN or b==NaN as a bit mask */ template EIGEN_DEVICE_FUNC inline Packet pcmp_lt_or_nan(const Packet& a, const Packet& b) { return a>=b ? pzero(a) : ptrue(a); } template struct bit_and { EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const { return a & b; } }; template struct bit_or { EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const { return a | b; } }; template struct bit_xor { EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const { return a ^ b; } }; template struct bit_not { EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a) const { return ~a; } }; // Use operators &, |, ^, ~. template struct operator_bitwise_helper { EIGEN_DEVICE_FUNC static inline T bitwise_and(const T& a, const T& b) { return bit_and()(a, b); } EIGEN_DEVICE_FUNC static inline T bitwise_or(const T& a, const T& b) { return bit_or()(a, b); } EIGEN_DEVICE_FUNC static inline T bitwise_xor(const T& a, const T& b) { return bit_xor()(a, b); } EIGEN_DEVICE_FUNC static inline T bitwise_not(const T& a) { return bit_not()(a); } }; // Apply binary operations byte-by-byte template struct bytewise_bitwise_helper { EIGEN_DEVICE_FUNC static inline T bitwise_and(const T& a, const T& b) { return binary(a, b, bit_and()); } EIGEN_DEVICE_FUNC static inline T bitwise_or(const T& a, const T& b) { return binary(a, b, bit_or()); } EIGEN_DEVICE_FUNC static inline T bitwise_xor(const T& a, const T& b) { return binary(a, b, bit_xor()); } EIGEN_DEVICE_FUNC static inline T bitwise_not(const T& a) { return unary(a,bit_not()); } private: template EIGEN_DEVICE_FUNC static inline T unary(const T& a, Op op) { const unsigned char* a_ptr = reinterpret_cast(&a); T c; unsigned char* c_ptr = reinterpret_cast(&c); for (size_t i = 0; i < sizeof(T); ++i) { *c_ptr++ = op(*a_ptr++); } return c; } template EIGEN_DEVICE_FUNC static inline T binary(const T& a, const T& b, Op op) { const unsigned char* a_ptr = reinterpret_cast(&a); const unsigned char* b_ptr = reinterpret_cast(&b); T c; unsigned char* c_ptr = reinterpret_cast(&c); for (size_t i = 0; i < sizeof(T); ++i) { *c_ptr++ = op(*a_ptr++, *b_ptr++); } return c; } }; // In the general case, use byte-by-byte manipulation. template struct bitwise_helper : public bytewise_bitwise_helper {}; // For integers or non-trivial scalars, use binary operators. template struct bitwise_helper::value && (NumTraits::IsInteger || NumTraits::RequireInitialization)>::type > : public operator_bitwise_helper {}; /** \internal \returns the bitwise and of \a a and \a b */ template EIGEN_DEVICE_FUNC inline Packet pand(const Packet& a, const Packet& b) { return bitwise_helper::bitwise_and(a, b); } /** \internal \returns the bitwise or of \a a and \a b */ template EIGEN_DEVICE_FUNC inline Packet por(const Packet& a, const Packet& b) { return bitwise_helper::bitwise_or(a, b); } /** \internal \returns the bitwise xor of \a a and \a b */ template EIGEN_DEVICE_FUNC inline Packet pxor(const Packet& a, const Packet& b) { return bitwise_helper::bitwise_xor(a, b); } /** \internal \returns the bitwise not of \a a */ template EIGEN_DEVICE_FUNC inline Packet pnot(const Packet& a) { return bitwise_helper::bitwise_not(a); } /** \internal \returns the bitwise and of \a a and not \a b */ template EIGEN_DEVICE_FUNC inline Packet pandnot(const Packet& a, const Packet& b) { return pand(a, pnot(b)); } // In the general case, use bitwise select. template struct pselect_impl { static EIGEN_DEVICE_FUNC inline Packet run(const Packet& mask, const Packet& a, const Packet& b) { return por(pand(a,mask),pandnot(b,mask)); } }; // For scalars, use ternary select. template struct pselect_impl::value>::type > { static EIGEN_DEVICE_FUNC inline Packet run(const Packet& mask, const Packet& a, const Packet& b) { return numext::equal_strict(mask, Packet(0)) ? b : a; } }; /** \internal \returns \a or \b for each field in packet according to \mask */ template EIGEN_DEVICE_FUNC inline Packet pselect(const Packet& mask, const Packet& a, const Packet& b) { return pselect_impl::run(mask, a, b); } template<> EIGEN_DEVICE_FUNC inline bool pselect( const bool& cond, const bool& a, const bool& b) { return cond ? a : b; } /** \internal \returns the min or of \a a and \a b (coeff-wise) If either \a a or \a b are NaN, the result is implementation defined. */ template struct pminmax_impl { template static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) { return op(a,b); } }; /** \internal \returns the min or max of \a a and \a b (coeff-wise) If either \a a or \a b are NaN, NaN is returned. */ template<> struct pminmax_impl { template static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) { Packet not_nan_mask_a = pcmp_eq(a, a); Packet not_nan_mask_b = pcmp_eq(b, b); return pselect(not_nan_mask_a, pselect(not_nan_mask_b, op(a, b), b), a); } }; /** \internal \returns the min or max of \a a and \a b (coeff-wise) If both \a a and \a b are NaN, NaN is returned. Equivalent to std::fmin(a, b). */ template<> struct pminmax_impl { template static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) { Packet not_nan_mask_a = pcmp_eq(a, a); Packet not_nan_mask_b = pcmp_eq(b, b); return pselect(not_nan_mask_a, pselect(not_nan_mask_b, op(a, b), a), b); } }; #ifndef SYCL_DEVICE_ONLY #define EIGEN_BINARY_OP_NAN_PROPAGATION(Type, Func) Func #else #define EIGEN_BINARY_OP_NAN_PROPAGATION(Type, Func) \ [](const Type& a, const Type& b) { \ return Func(a, b);} #endif /** \internal \returns the min of \a a and \a b (coeff-wise). If \a a or \b b is NaN, the return value is implementation defined. */ template EIGEN_DEVICE_FUNC inline Packet pmin(const Packet& a, const Packet& b) { return numext::mini(a,b); } /** \internal \returns the min of \a a and \a b (coeff-wise). NaNPropagation determines the NaN propagation semantics. */ template EIGEN_DEVICE_FUNC inline Packet pmin(const Packet& a, const Packet& b) { return pminmax_impl::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet, (pmin))); } /** \internal \returns the max of \a a and \a b (coeff-wise) If \a a or \b b is NaN, the return value is implementation defined. */ template EIGEN_DEVICE_FUNC inline Packet pmax(const Packet& a, const Packet& b) { return numext::maxi(a, b); } /** \internal \returns the max of \a a and \a b (coeff-wise). NaNPropagation determines the NaN propagation semantics. */ template EIGEN_DEVICE_FUNC inline Packet pmax(const Packet& a, const Packet& b) { return pminmax_impl::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet,(pmax))); } /** \internal \returns the absolute value of \a a */ template EIGEN_DEVICE_FUNC inline Packet pabs(const Packet& a) { return numext::abs(a); } template<> EIGEN_DEVICE_FUNC inline unsigned int pabs(const unsigned int& a) { return a; } template<> EIGEN_DEVICE_FUNC inline unsigned long pabs(const unsigned long& a) { return a; } template<> EIGEN_DEVICE_FUNC inline unsigned long long pabs(const unsigned long long& a) { return a; } /** \internal \returns the addsub value of \a a,b */ template EIGEN_DEVICE_FUNC inline Packet paddsub(const Packet& a, const Packet& b) { return pselect(peven_mask(a), padd(a, b), psub(a, b)); } /** \internal \returns the phase angle of \a a */ template EIGEN_DEVICE_FUNC inline Packet parg(const Packet& a) { using numext::arg; return arg(a); } /** \internal \returns \a a logically shifted by N bits to the right */ template EIGEN_DEVICE_FUNC inline int parithmetic_shift_right(const int& a) { return a >> N; } template EIGEN_DEVICE_FUNC inline long int parithmetic_shift_right(const long int& a) { return a >> N; } /** \internal \returns \a a arithmetically shifted by N bits to the right */ template EIGEN_DEVICE_FUNC inline int plogical_shift_right(const int& a) { return static_cast(static_cast(a) >> N); } template EIGEN_DEVICE_FUNC inline long int plogical_shift_right(const long int& a) { return static_cast(static_cast(a) >> N); } /** \internal \returns \a a shifted by N bits to the left */ template EIGEN_DEVICE_FUNC inline int plogical_shift_left(const int& a) { return a << N; } template EIGEN_DEVICE_FUNC inline long int plogical_shift_left(const long int& a) { return a << N; } /** \internal \returns the significant and exponent of the underlying floating point numbers * See https://en.cppreference.com/w/cpp/numeric/math/frexp */ template EIGEN_DEVICE_FUNC inline Packet pfrexp(const Packet& a, Packet& exponent) { int exp; EIGEN_USING_STD(frexp); Packet result = static_cast(frexp(a, &exp)); exponent = static_cast(exp); return result; } /** \internal \returns a * 2^((int)exponent) * See https://en.cppreference.com/w/cpp/numeric/math/ldexp */ template EIGEN_DEVICE_FUNC inline Packet pldexp(const Packet &a, const Packet &exponent) { EIGEN_USING_STD(ldexp) return static_cast(ldexp(a, static_cast(exponent))); } /** \internal \returns the min of \a a and \a b (coeff-wise) */ template EIGEN_DEVICE_FUNC inline Packet pabsdiff(const Packet& a, const Packet& b) { return pselect(pcmp_lt(a, b), psub(b, a), psub(a, b)); } /** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */ template EIGEN_DEVICE_FUNC inline Packet pload(const typename unpacket_traits::type* from) { return *from; } /** \internal \returns a packet version of \a *from, (un-aligned load) */ template EIGEN_DEVICE_FUNC inline Packet ploadu(const typename unpacket_traits::type* from) { return *from; } /** \internal \returns a packet version of \a *from, (un-aligned masked load) * There is no generic implementation. We only have implementations for specialized * cases. Generic case should not be called. */ template EIGEN_DEVICE_FUNC inline typename enable_if::masked_load_available, Packet>::type ploadu(const typename unpacket_traits::type* from, typename unpacket_traits::mask_t umask); /** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */ template EIGEN_DEVICE_FUNC inline Packet pset1(const typename unpacket_traits::type& a) { return a; } /** \internal \returns a packet with constant coefficients set from bits */ template EIGEN_DEVICE_FUNC inline Packet pset1frombits(BitsType a); /** \internal \returns a packet with constant coefficients \a a[0], e.g.: (a[0],a[0],a[0],a[0]) */ template EIGEN_DEVICE_FUNC inline Packet pload1(const typename unpacket_traits::type *a) { return pset1(*a); } /** \internal \returns a packet with elements of \a *from duplicated. * For instance, for a packet of 8 elements, 4 scalars will be read from \a *from and * duplicated to form: {from[0],from[0],from[1],from[1],from[2],from[2],from[3],from[3]} * Currently, this function is only used for scalar * complex products. */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet ploaddup(const typename unpacket_traits::type* from) { return *from; } /** \internal \returns a packet with elements of \a *from quadrupled. * For instance, for a packet of 8 elements, 2 scalars will be read from \a *from and * replicated to form: {from[0],from[0],from[0],from[0],from[1],from[1],from[1],from[1]} * Currently, this function is only used in matrix products. * For packet-size smaller or equal to 4, this function is equivalent to pload1 */ template EIGEN_DEVICE_FUNC inline Packet ploadquad(const typename unpacket_traits::type* from) { return pload1(from); } /** \internal equivalent to * \code * a0 = pload1(a+0); * a1 = pload1(a+1); * a2 = pload1(a+2); * a3 = pload1(a+3); * \endcode * \sa pset1, pload1, ploaddup, pbroadcast2 */ template EIGEN_DEVICE_FUNC inline void pbroadcast4(const typename unpacket_traits::type *a, Packet& a0, Packet& a1, Packet& a2, Packet& a3) { a0 = pload1(a+0); a1 = pload1(a+1); a2 = pload1(a+2); a3 = pload1(a+3); } /** \internal equivalent to * \code * a0 = pload1(a+0); * a1 = pload1(a+1); * \endcode * \sa pset1, pload1, ploaddup, pbroadcast4 */ template EIGEN_DEVICE_FUNC inline void pbroadcast2(const typename unpacket_traits::type *a, Packet& a0, Packet& a1) { a0 = pload1(a+0); a1 = pload1(a+1); } /** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet plset(const typename unpacket_traits::type& a) { return a; } /** \internal \returns a packet with constant coefficients \a a, e.g.: (x, 0, x, 0), where x is the value of all 1-bits. */ template EIGEN_DEVICE_FUNC inline Packet peven_mask(const Packet& /*a*/) { typedef typename unpacket_traits::type Scalar; const size_t n = unpacket_traits::size; EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) Scalar elements[n]; for(size_t i = 0; i < n; ++i) { memset(elements+i, ((i & 1) == 0 ? 0xff : 0), sizeof(Scalar)); } return ploadu(elements); } /** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */ template EIGEN_DEVICE_FUNC inline void pstore(Scalar* to, const Packet& from) { (*to) = from; } /** \internal copy the packet \a from to \a *to, (un-aligned store) */ template EIGEN_DEVICE_FUNC inline void pstoreu(Scalar* to, const Packet& from) { (*to) = from; } /** \internal copy the packet \a from to \a *to, (un-aligned store with a mask) * There is no generic implementation. We only have implementations for specialized * cases. Generic case should not be called. */ template EIGEN_DEVICE_FUNC inline typename enable_if::masked_store_available, void>::type pstoreu(Scalar* to, const Packet& from, typename unpacket_traits::mask_t umask); template EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, Index /*stride*/) { return ploadu(from); } template EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, Index /*stride*/) { pstore(to, from); } /** \internal tries to do cache prefetching of \a addr */ template EIGEN_DEVICE_FUNC inline void prefetch(const Scalar* addr) { #if defined(EIGEN_HIP_DEVICE_COMPILE) // do nothing #elif defined(EIGEN_CUDA_ARCH) #if defined(__LP64__) || EIGEN_OS_WIN64 // 64-bit pointer operand constraint for inlined asm asm(" prefetch.L1 [ %1 ];" : "=l"(addr) : "l"(addr)); #else // 32-bit pointer operand constraint for inlined asm asm(" prefetch.L1 [ %1 ];" : "=r"(addr) : "r"(addr)); #endif #elif (!EIGEN_COMP_MSVC) && (EIGEN_COMP_GNUC || EIGEN_COMP_CLANG || EIGEN_COMP_ICC) __builtin_prefetch(addr); #endif } /** \internal \returns the reversed elements of \a a*/ template EIGEN_DEVICE_FUNC inline Packet preverse(const Packet& a) { return a; } /** \internal \returns \a a with real and imaginary part flipped (for complex type only) */ template EIGEN_DEVICE_FUNC inline Packet pcplxflip(const Packet& a) { return Packet(numext::imag(a),numext::real(a)); } /************************** * Special math functions ***************************/ /** \internal \returns the sine of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psin(const Packet& a) { EIGEN_USING_STD(sin); return sin(a); } /** \internal \returns the cosine of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pcos(const Packet& a) { EIGEN_USING_STD(cos); return cos(a); } /** \internal \returns the tan of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet ptan(const Packet& a) { EIGEN_USING_STD(tan); return tan(a); } /** \internal \returns the arc sine of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pasin(const Packet& a) { EIGEN_USING_STD(asin); return asin(a); } /** \internal \returns the arc cosine of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pacos(const Packet& a) { EIGEN_USING_STD(acos); return acos(a); } /** \internal \returns the arc tangent of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet patan(const Packet& a) { EIGEN_USING_STD(atan); return atan(a); } /** \internal \returns the hyperbolic sine of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psinh(const Packet& a) { EIGEN_USING_STD(sinh); return sinh(a); } /** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pcosh(const Packet& a) { EIGEN_USING_STD(cosh); return cosh(a); } /** \internal \returns the hyperbolic tan of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet ptanh(const Packet& a) { EIGEN_USING_STD(tanh); return tanh(a); } /** \internal \returns the exp of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pexp(const Packet& a) { EIGEN_USING_STD(exp); return exp(a); } /** \internal \returns the expm1 of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pexpm1(const Packet& a) { return numext::expm1(a); } /** \internal \returns the log of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog(const Packet& a) { EIGEN_USING_STD(log); return log(a); } /** \internal \returns the log1p of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog1p(const Packet& a) { return numext::log1p(a); } /** \internal \returns the log10 of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog10(const Packet& a) { EIGEN_USING_STD(log10); return log10(a); } /** \internal \returns the log10 of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog2(const Packet& a) { typedef typename internal::unpacket_traits::type Scalar; return pmul(pset1(Scalar(EIGEN_LOG2E)), plog(a)); } /** \internal \returns the square-root of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psqrt(const Packet& a) { return numext::sqrt(a); } /** \internal \returns the reciprocal square-root of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet prsqrt(const Packet& a) { typedef typename internal::unpacket_traits::type Scalar; return pdiv(pset1(Scalar(1)), psqrt(a)); } /** \internal \returns the rounded value of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pround(const Packet& a) { using numext::round; return round(a); } /** \internal \returns the floor of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pfloor(const Packet& a) { using numext::floor; return floor(a); } /** \internal \returns the rounded value of \a a (coeff-wise) with current * rounding mode */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet print(const Packet& a) { using numext::rint; return rint(a); } /** \internal \returns the ceil of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pceil(const Packet& a) { using numext::ceil; return ceil(a); } /** \internal \returns the first element of a packet */ template EIGEN_DEVICE_FUNC inline typename unpacket_traits::type pfirst(const Packet& a) { return a; } /** \internal \returns the sum of the elements of upper and lower half of \a a if \a a is larger than 4. * For a packet {a0, a1, a2, a3, a4, a5, a6, a7}, it returns a half packet {a0+a4, a1+a5, a2+a6, a3+a7} * For packet-size smaller or equal to 4, this boils down to a noop. */ template EIGEN_DEVICE_FUNC inline typename conditional<(unpacket_traits::size%8)==0,typename unpacket_traits::half,Packet>::type predux_half_dowto4(const Packet& a) { return a; } // Slow generic implementation of Packet reduction. template EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_helper(const Packet& a, Op op) { typedef typename unpacket_traits::type Scalar; const size_t n = unpacket_traits::size; EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) Scalar elements[n]; pstoreu(elements, a); for(size_t k = n / 2; k > 0; k /= 2) { for(size_t i = 0; i < k; ++i) { elements[i] = op(elements[i], elements[i + k]); } } return elements[0]; } /** \internal \returns the sum of the elements of \a a*/ template EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux(const Packet& a) { return a; } /** \internal \returns the product of the elements of \a a */ template EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_mul( const Packet& a) { typedef typename unpacket_traits::type Scalar; return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmul))); } /** \internal \returns the min of the elements of \a a */ template EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_min( const Packet &a) { typedef typename unpacket_traits::type Scalar; return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin))); } template EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_min( const Packet& a) { typedef typename unpacket_traits::type Scalar; return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin))); } /** \internal \returns the min of the elements of \a a */ template EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_max( const Packet &a) { typedef typename unpacket_traits::type Scalar; return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax))); } template EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_max( const Packet& a) { typedef typename unpacket_traits::type Scalar; return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax))); } #undef EIGEN_BINARY_OP_NAN_PROPAGATION /** \internal \returns true if all coeffs of \a a means "true" * It is supposed to be called on values returned by pcmp_*. */ // not needed yet // template EIGEN_DEVICE_FUNC inline bool predux_all(const Packet& a) // { return bool(a); } /** \internal \returns true if any coeffs of \a a means "true" * It is supposed to be called on values returned by pcmp_*. */ template EIGEN_DEVICE_FUNC inline bool predux_any(const Packet& a) { // Dirty but generic implementation where "true" is assumed to be non 0 and all the sames. // It is expected that "true" is either: // - Scalar(1) // - bits full of ones (NaN for floats), // - or first bit equals to 1 (1 for ints, smallest denormal for floats). // For all these cases, taking the sum is just fine, and this boils down to a no-op for scalars. typedef typename unpacket_traits::type Scalar; return numext::not_equal_strict(predux(a), Scalar(0)); } /*************************************************************************** * The following functions might not have to be overwritten for vectorized types ***************************************************************************/ /** \internal copy a packet with constant coefficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned */ // NOTE: this function must really be templated on the packet type (think about different packet types for the same scalar type) template inline void pstore1(typename unpacket_traits::type* to, const typename unpacket_traits::type& a) { pstore(to, pset1(a)); } /** \internal \returns a * b + c (coeff-wise) */ template EIGEN_DEVICE_FUNC inline Packet pmadd(const Packet& a, const Packet& b, const Packet& c) { return padd(pmul(a, b),c); } /** \internal \returns a packet version of \a *from. * The pointer \a from must be aligned on a \a Alignment bytes boundary. */ template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt(const typename unpacket_traits::type* from) { if(Alignment >= unpacket_traits::alignment) return pload(from); else return ploadu(from); } /** \internal copy the packet \a from to \a *to. * The pointer \a from must be aligned on a \a Alignment bytes boundary. */ template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret(Scalar* to, const Packet& from) { if(Alignment >= unpacket_traits::alignment) pstore(to, from); else pstoreu(to, from); } /** \internal \returns a packet version of \a *from. * Unlike ploadt, ploadt_ro takes advantage of the read-only memory path on the * hardware if available to speedup the loading of data that won't be modified * by the current computation. */ template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt_ro(const typename unpacket_traits::type* from) { return ploadt(from); } /*************************************************************************** * Fast complex products (GCC generates a function call which is very slow) ***************************************************************************/ // Eigen+CUDA does not support complexes. #if !defined(EIGEN_GPUCC) template<> inline std::complex pmul(const std::complex& a, const std::complex& b) { return std::complex(a.real()*b.real() - a.imag()*b.imag(), a.imag()*b.real() + a.real()*b.imag()); } template<> inline std::complex pmul(const std::complex& a, const std::complex& b) { return std::complex(a.real()*b.real() - a.imag()*b.imag(), a.imag()*b.real() + a.real()*b.imag()); } #endif /*************************************************************************** * PacketBlock, that is a collection of N packets where the number of words * in the packet is a multiple of N. ***************************************************************************/ template ::size> struct PacketBlock { Packet packet[N]; }; template EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& /*kernel*/) { // Nothing to do in the scalar case, i.e. a 1x1 matrix. } /*************************************************************************** * Selector, i.e. vector of N boolean values used to select (i.e. blend) * words from 2 packets. ***************************************************************************/ template struct Selector { bool select[N]; }; template EIGEN_DEVICE_FUNC inline Packet pblend(const Selector::size>& ifPacket, const Packet& thenPacket, const Packet& elsePacket) { return ifPacket.select[0] ? thenPacket : elsePacket; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERIC_PACKET_MATH_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/GlobalFunctions.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010-2016 Gael Guennebaud // Copyright (C) 2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GLOBAL_FUNCTIONS_H #define EIGEN_GLOBAL_FUNCTIONS_H #ifdef EIGEN_PARSED_BY_DOXYGEN #define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME,FUNCTOR,DOC_OP,DOC_DETAILS) \ /** \returns an expression of the coefficient-wise DOC_OP of \a x DOC_DETAILS \sa Math functions, class CwiseUnaryOp */ \ template \ inline const Eigen::CwiseUnaryOp, const Derived> \ NAME(const Eigen::ArrayBase& x); #else #define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME,FUNCTOR,DOC_OP,DOC_DETAILS) \ template \ inline const Eigen::CwiseUnaryOp, const Derived> \ (NAME)(const Eigen::ArrayBase& x) { \ return Eigen::CwiseUnaryOp, const Derived>(x.derived()); \ } #endif // EIGEN_PARSED_BY_DOXYGEN #define EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(NAME,FUNCTOR) \ \ template \ struct NAME##_retval > \ { \ typedef const Eigen::CwiseUnaryOp, const Derived> type; \ }; \ template \ struct NAME##_impl > \ { \ static inline typename NAME##_retval >::type run(const Eigen::ArrayBase& x) \ { \ return typename NAME##_retval >::type(x.derived()); \ } \ }; #include "./InternalHeaderCheck.h" namespace Eigen { EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(real,scalar_real_op,real part,\sa ArrayBase::real) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(imag,scalar_imag_op,imaginary part,\sa ArrayBase::imag) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(conj,scalar_conjugate_op,complex conjugate,\sa ArrayBase::conjugate) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(inverse,scalar_inverse_op,inverse,\sa ArrayBase::inverse) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sin,scalar_sin_op,sine,\sa ArrayBase::sin) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cos,scalar_cos_op,cosine,\sa ArrayBase::cos) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tan,scalar_tan_op,tangent,\sa ArrayBase::tan) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atan,scalar_atan_op,arc-tangent,\sa ArrayBase::atan) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asin,scalar_asin_op,arc-sine,\sa ArrayBase::asin) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acos,scalar_acos_op,arc-consine,\sa ArrayBase::acos) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sinh,scalar_sinh_op,hyperbolic sine,\sa ArrayBase::sinh) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cosh,scalar_cosh_op,hyperbolic cosine,\sa ArrayBase::cosh) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tanh,scalar_tanh_op,hyperbolic tangent,\sa ArrayBase::tanh) #if EIGEN_HAS_CXX11_MATH EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asinh,scalar_asinh_op,inverse hyperbolic sine,\sa ArrayBase::asinh) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acosh,scalar_acosh_op,inverse hyperbolic cosine,\sa ArrayBase::acosh) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atanh,scalar_atanh_op,inverse hyperbolic tangent,\sa ArrayBase::atanh) #endif EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(logistic,scalar_logistic_op,logistic function,\sa ArrayBase::logistic) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(lgamma,scalar_lgamma_op,natural logarithm of the gamma function,\sa ArrayBase::lgamma) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(digamma,scalar_digamma_op,derivative of lgamma,\sa ArrayBase::digamma) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erf,scalar_erf_op,error function,\sa ArrayBase::erf) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erfc,scalar_erfc_op,complement error function,\sa ArrayBase::erfc) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(ndtri,scalar_ndtri_op,inverse normal distribution function,\sa ArrayBase::ndtri) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp,scalar_exp_op,exponential,\sa ArrayBase::exp) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(expm1,scalar_expm1_op,exponential of a value minus 1,\sa ArrayBase::expm1) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log,scalar_log_op,natural logarithm,\sa Eigen::log10 DOXCOMMA ArrayBase::log) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log1p,scalar_log1p_op,natural logarithm of 1 plus the value,\sa ArrayBase::log1p) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log10,scalar_log10_op,base 10 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log10) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log2,scalar_log2_op,base 2 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log2) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs,scalar_abs_op,absolute value,\sa ArrayBase::abs DOXCOMMA MatrixBase::cwiseAbs) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs2,scalar_abs2_op,squared absolute value,\sa ArrayBase::abs2 DOXCOMMA MatrixBase::cwiseAbs2) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(arg,scalar_arg_op,complex argument,\sa ArrayBase::arg DOXCOMMA MatrixBase::cwiseArg) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sqrt,scalar_sqrt_op,square root,\sa ArrayBase::sqrt DOXCOMMA MatrixBase::cwiseSqrt) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rsqrt,scalar_rsqrt_op,reciprocal square root,\sa ArrayBase::rsqrt) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(square,scalar_square_op,square (power 2),\sa Eigen::abs2 DOXCOMMA Eigen::pow DOXCOMMA ArrayBase::square) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cube,scalar_cube_op,cube (power 3),\sa Eigen::pow DOXCOMMA ArrayBase::cube) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rint,scalar_rint_op,nearest integer,\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(round,scalar_round_op,nearest integer,\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(floor,scalar_floor_op,nearest integer not greater than the giben value,\sa Eigen::ceil DOXCOMMA ArrayBase::floor) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(ceil,scalar_ceil_op,nearest integer not less than the giben value,\sa Eigen::floor DOXCOMMA ArrayBase::ceil) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isnan,scalar_isnan_op,not-a-number test,\sa Eigen::isinf DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isnan) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isinf,scalar_isinf_op,infinite value test,\sa Eigen::isnan DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isinf) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isfinite,scalar_isfinite_op,finite value test,\sa Eigen::isinf DOXCOMMA Eigen::isnan DOXCOMMA ArrayBase::isfinite) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sign,scalar_sign_op,sign (or 0),\sa ArrayBase::sign) /** \returns an expression of the coefficient-wise power of \a x to the given constant \a exponent. * * \tparam ScalarExponent is the scalar type of \a exponent. It must be compatible with the scalar type of the given expression (\c Derived::Scalar). * * \sa ArrayBase::pow() * * \relates ArrayBase */ #ifdef EIGEN_PARSED_BY_DOXYGEN template inline const CwiseBinaryOp,Derived,Constant > pow(const Eigen::ArrayBase& x, const ScalarExponent& exponent); #else template EIGEN_DEVICE_FUNC inline EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE( const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg::type,pow)) pow(const Eigen::ArrayBase& x, const ScalarExponent& exponent) { typedef typename internal::promote_scalar_arg::type PromotedExponent; return EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,PromotedExponent,pow)(x.derived(), typename internal::plain_constant_type::type(x.derived().rows(), x.derived().cols(), internal::scalar_constant_op(exponent))); } #endif /** \returns an expression of the coefficient-wise power of \a x to the given array of \a exponents. * * This function computes the coefficient-wise power. * * Example: \include Cwise_array_power_array.cpp * Output: \verbinclude Cwise_array_power_array.out * * \sa ArrayBase::pow() * * \relates ArrayBase */ template inline const Eigen::CwiseBinaryOp, const Derived, const ExponentDerived> pow(const Eigen::ArrayBase& x, const Eigen::ArrayBase& exponents) { return Eigen::CwiseBinaryOp, const Derived, const ExponentDerived>( x.derived(), exponents.derived() ); } /** \returns an expression of the coefficient-wise power of the scalar \a x to the given array of \a exponents. * * This function computes the coefficient-wise power between a scalar and an array of exponents. * * \tparam Scalar is the scalar type of \a x. It must be compatible with the scalar type of the given array expression (\c Derived::Scalar). * * Example: \include Cwise_scalar_power_array.cpp * Output: \verbinclude Cwise_scalar_power_array.out * * \sa ArrayBase::pow() * * \relates ArrayBase */ #ifdef EIGEN_PARSED_BY_DOXYGEN template inline const CwiseBinaryOp,Constant,Derived> pow(const Scalar& x,const Eigen::ArrayBase& x); #else template EIGEN_DEVICE_FUNC inline EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE( const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg::type,Derived,pow)) pow(const Scalar& x, const Eigen::ArrayBase& exponents) { typedef typename internal::promote_scalar_arg::type PromotedScalar; return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(PromotedScalar,Derived,pow)( typename internal::plain_constant_type::type(exponents.derived().rows(), exponents.derived().cols(), internal::scalar_constant_op(x)), exponents.derived()); } #endif namespace internal { EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real,scalar_real_op) EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag,scalar_imag_op) EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2,scalar_abs2_op) } } // TODO: cleanly disable those functions that are not supported on Array (numext::real_ref, internal::random, internal::isApprox...) #endif // EIGEN_GLOBAL_FUNCTIONS_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/IO.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_IO_H #define EIGEN_IO_H #include "./InternalHeaderCheck.h" namespace Eigen { enum { DontAlignCols = 1 }; enum { StreamPrecision = -1, FullPrecision = -2 }; namespace internal { template std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt); } /** \class IOFormat * \ingroup Core_Module * * \brief Stores a set of parameters controlling the way matrices are printed * * List of available parameters: * - \b precision number of digits for floating point values, or one of the special constants \c StreamPrecision and \c FullPrecision. * The default is the special value \c StreamPrecision which means to use the * stream's own precision setting, as set for instance using \c cout.precision(3). The other special value * \c FullPrecision means that the number of digits will be computed to match the full precision of each floating-point * type. * - \b flags an OR-ed combination of flags, the default value is 0, the only currently available flag is \c DontAlignCols which * allows to disable the alignment of columns, resulting in faster code. * - \b coeffSeparator string printed between two coefficients of the same row * - \b rowSeparator string printed between two rows * - \b rowPrefix string printed at the beginning of each row * - \b rowSuffix string printed at the end of each row * - \b matPrefix string printed at the beginning of the matrix * - \b matSuffix string printed at the end of the matrix * - \b fill character printed to fill the empty space in aligned columns * * Example: \include IOFormat.cpp * Output: \verbinclude IOFormat.out * * \sa DenseBase::format(), class WithFormat */ struct IOFormat { /** Default constructor, see class IOFormat for the meaning of the parameters */ IOFormat(int _precision = StreamPrecision, int _flags = 0, const std::string& _coeffSeparator = " ", const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="", const std::string& _matPrefix="", const std::string& _matSuffix="", const char _fill=' ') : matPrefix(_matPrefix), matSuffix(_matSuffix), rowPrefix(_rowPrefix), rowSuffix(_rowSuffix), rowSeparator(_rowSeparator), rowSpacer(""), coeffSeparator(_coeffSeparator), fill(_fill), precision(_precision), flags(_flags) { // TODO check if rowPrefix, rowSuffix or rowSeparator contains a newline // don't add rowSpacer if columns are not to be aligned if((flags & DontAlignCols)) return; int i = int(matSuffix.length())-1; while (i>=0 && matSuffix[i]!='\n') { rowSpacer += ' '; i--; } } std::string matPrefix, matSuffix; std::string rowPrefix, rowSuffix, rowSeparator, rowSpacer; std::string coeffSeparator; char fill; int precision; int flags; }; /** \class WithFormat * \ingroup Core_Module * * \brief Pseudo expression providing matrix output with given format * * \tparam ExpressionType the type of the object on which IO stream operations are performed * * This class represents an expression with stream operators controlled by a given IOFormat. * It is the return type of DenseBase::format() * and most of the time this is the only way it is used. * * See class IOFormat for some examples. * * \sa DenseBase::format(), class IOFormat */ template class WithFormat { public: WithFormat(const ExpressionType& matrix, const IOFormat& format) : m_matrix(matrix), m_format(format) {} friend std::ostream & operator << (std::ostream & s, const WithFormat& wf) { return internal::print_matrix(s, wf.m_matrix.eval(), wf.m_format); } protected: typename ExpressionType::Nested m_matrix; IOFormat m_format; }; namespace internal { // NOTE: This helper is kept for backward compatibility with previous code specializing // this internal::significant_decimals_impl structure. In the future we should directly // call digits10() which has been introduced in July 2016 in 3.3. template struct significant_decimals_impl { static inline int run() { return NumTraits::digits10(); } }; /** \internal * print the matrix \a _m to the output stream \a s using the output format \a fmt */ template std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt) { using internal::is_same; using internal::conditional; if(_m.size() == 0) { s << fmt.matPrefix << fmt.matSuffix; return s; } typename Derived::Nested m = _m; typedef typename Derived::Scalar Scalar; typedef typename conditional< is_same::value || is_same::value || is_same::value || is_same::value, int, typename conditional< is_same >::value || is_same >::value || is_same >::value || is_same >::value, std::complex, const Scalar& >::type >::type PrintType; Index width = 0; std::streamsize explicit_precision; if(fmt.precision == StreamPrecision) { explicit_precision = 0; } else if(fmt.precision == FullPrecision) { if (NumTraits::IsInteger) { explicit_precision = 0; } else { explicit_precision = significant_decimals_impl::run(); } } else { explicit_precision = fmt.precision; } std::streamsize old_precision = 0; if(explicit_precision) old_precision = s.precision(explicit_precision); bool align_cols = !(fmt.flags & DontAlignCols); if(align_cols) { // compute the largest width for(Index j = 0; j < m.cols(); ++j) for(Index i = 0; i < m.rows(); ++i) { std::stringstream sstr; sstr.copyfmt(s); sstr << static_cast(m.coeff(i,j)); width = std::max(width, Index(sstr.str().length())); } } std::streamsize old_width = s.width(); char old_fill_character = s.fill(); s << fmt.matPrefix; for(Index i = 0; i < m.rows(); ++i) { if (i) s << fmt.rowSpacer; s << fmt.rowPrefix; if(width) { s.fill(fmt.fill); s.width(width); } s << static_cast(m.coeff(i, 0)); for(Index j = 1; j < m.cols(); ++j) { s << fmt.coeffSeparator; if(width) { s.fill(fmt.fill); s.width(width); } s << static_cast(m.coeff(i, j)); } s << fmt.rowSuffix; if( i < m.rows() - 1) s << fmt.rowSeparator; } s << fmt.matSuffix; if(explicit_precision) s.precision(old_precision); if(width) { s.fill(old_fill_character); s.width(old_width); } return s; } } // end namespace internal /** \relates DenseBase * * Outputs the matrix, to the given stream. * * If you wish to print the matrix with a format different than the default, use DenseBase::format(). * * It is also possible to change the default format by defining EIGEN_DEFAULT_IO_FORMAT before including Eigen headers. * If not defined, this will automatically be defined to Eigen::IOFormat(), that is the Eigen::IOFormat with default parameters. * * \sa DenseBase::format() */ template std::ostream & operator << (std::ostream & s, const DenseBase & m) { return internal::print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT); } } // end namespace Eigen #endif // EIGEN_IO_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/IndexedView.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2017 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_INDEXED_VIEW_H #define EIGEN_INDEXED_VIEW_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > : traits { enum { RowsAtCompileTime = int(array_size::value), ColsAtCompileTime = int(array_size::value), MaxRowsAtCompileTime = RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime) : Dynamic, MaxColsAtCompileTime = ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime) : Dynamic, XprTypeIsRowMajor = (int(traits::Flags)&RowMajorBit) != 0, IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 : XprTypeIsRowMajor, RowIncr = int(get_compile_time_incr::value), ColIncr = int(get_compile_time_incr::value), InnerIncr = IsRowMajor ? ColIncr : RowIncr, OuterIncr = IsRowMajor ? RowIncr : ColIncr, HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor), XprInnerStride = HasSameStorageOrderAsXprType ? int(inner_stride_at_compile_time::ret) : int(outer_stride_at_compile_time::ret), XprOuterstride = HasSameStorageOrderAsXprType ? int(outer_stride_at_compile_time::ret) : int(inner_stride_at_compile_time::ret), InnerSize = XprTypeIsRowMajor ? ColsAtCompileTime : RowsAtCompileTime, IsBlockAlike = InnerIncr==1 && OuterIncr==1, IsInnerPannel = HasSameStorageOrderAsXprType && is_same,typename conditional::type>::value, InnerStrideAtCompileTime = InnerIncr<0 || InnerIncr==DynamicIndex || XprInnerStride==Dynamic ? Dynamic : XprInnerStride * InnerIncr, OuterStrideAtCompileTime = OuterIncr<0 || OuterIncr==DynamicIndex || XprOuterstride==Dynamic ? Dynamic : XprOuterstride * OuterIncr, ReturnAsScalar = is_same::value && is_same::value, ReturnAsBlock = (!ReturnAsScalar) && IsBlockAlike, ReturnAsIndexedView = (!ReturnAsScalar) && (!ReturnAsBlock), // FIXME we deal with compile-time strides if and only if we have DirectAccessBit flag, // but this is too strict regarding negative strides... DirectAccessMask = (int(InnerIncr)!=UndefinedIncr && int(OuterIncr)!=UndefinedIncr && InnerIncr>=0 && OuterIncr>=0) ? DirectAccessBit : 0, FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0, Flags = (traits::Flags & (HereditaryBits | DirectAccessMask )) | FlagsLvalueBit | FlagsRowMajorBit | FlagsLinearAccessBit }; typedef Block BlockType; }; } template class IndexedViewImpl; /** \class IndexedView * \ingroup Core_Module * * \brief Expression of a non-sequential sub-matrix defined by arbitrary sequences of row and column indices * * \tparam XprType the type of the expression in which we are taking the intersections of sub-rows and sub-columns * \tparam RowIndices the type of the object defining the sequence of row indices * \tparam ColIndices the type of the object defining the sequence of column indices * * This class represents an expression of a sub-matrix (or sub-vector) defined as the intersection * of sub-sets of rows and columns, that are themself defined by generic sequences of row indices \f$ \{r_0,r_1,..r_{m-1}\} \f$ * and column indices \f$ \{c_0,c_1,..c_{n-1} \}\f$. Let \f$ A \f$ be the nested matrix, then the resulting matrix \f$ B \f$ has \c m * rows and \c n columns, and its entries are given by: \f$ B(i,j) = A(r_i,c_j) \f$. * * The \c RowIndices and \c ColIndices types must be compatible with the following API: * \code * operator[](Index) const; * Index size() const; * \endcode * * Typical supported types thus include: * - std::vector * - std::valarray * - std::array * - Plain C arrays: int[N] * - Eigen::ArrayXi * - decltype(ArrayXi::LinSpaced(...)) * - Any view/expressions of the previous types * - Eigen::ArithmeticSequence * - Eigen::internal::AllRange (helper for Eigen::placeholders::all) * - Eigen::internal::SingleRange (helper for single index) * - etc. * * In typical usages of %Eigen, this class should never be used directly. It is the return type of * DenseBase::operator()(const RowIndices&, const ColIndices&). * * \sa class Block */ template class IndexedView : public IndexedViewImpl::StorageKind> { public: typedef typename IndexedViewImpl::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(IndexedView) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(IndexedView) typedef typename internal::ref_selector::non_const_type MatrixTypeNested; typedef typename internal::remove_all::type NestedExpression; template IndexedView(XprType& xpr, const T0& rowIndices, const T1& colIndices) : m_xpr(xpr), m_rowIndices(rowIndices), m_colIndices(colIndices) {} /** \returns number of rows */ Index rows() const { return internal::size(m_rowIndices); } /** \returns number of columns */ Index cols() const { return internal::size(m_colIndices); } /** \returns the nested expression */ const typename internal::remove_all::type& nestedExpression() const { return m_xpr; } /** \returns the nested expression */ typename internal::remove_reference::type& nestedExpression() { return m_xpr; } /** \returns a const reference to the object storing/generating the row indices */ const RowIndices& rowIndices() const { return m_rowIndices; } /** \returns a const reference to the object storing/generating the column indices */ const ColIndices& colIndices() const { return m_colIndices; } protected: MatrixTypeNested m_xpr; RowIndices m_rowIndices; ColIndices m_colIndices; }; // Generic API dispatcher template class IndexedViewImpl : public internal::generic_xpr_base >::type { public: typedef typename internal::generic_xpr_base >::type Base; }; namespace internal { template struct unary_evaluator, IndexBased> : evaluator_base > { typedef IndexedView XprType; enum { CoeffReadCost = evaluator::CoeffReadCost /* TODO + cost of row/col index */, FlagsLinearAccessBit = (traits::RowsAtCompileTime == 1 || traits::ColsAtCompileTime == 1) ? LinearAccessBit : 0, FlagsRowMajorBit = traits::FlagsRowMajorBit, Flags = (evaluator::Flags & (HereditaryBits & ~RowMajorBit /*| LinearAccessBit | DirectAccessBit*/)) | FlagsLinearAccessBit | FlagsRowMajorBit, Alignment = 0 }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_xpr(xpr) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { EIGEN_STATIC_ASSERT_LVALUE(XprType) Index row = XprType::RowsAtCompileTime == 1 ? 0 : index; Index col = XprType::RowsAtCompileTime == 1 ? index : 0; return m_argImpl.coeffRef( m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const { Index row = XprType::RowsAtCompileTime == 1 ? 0 : index; Index col = XprType::RowsAtCompileTime == 1 ? index : 0; return m_argImpl.coeffRef( m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index index) const { Index row = XprType::RowsAtCompileTime == 1 ? 0 : index; Index col = XprType::RowsAtCompileTime == 1 ? index : 0; return m_argImpl.coeff( m_xpr.rowIndices()[row], m_xpr.colIndices()[col]); } protected: evaluator m_argImpl; const XprType& m_xpr; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_INDEXED_VIEW_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/InternalHeaderCheck.h ================================================ #ifndef EIGEN_CORE_MODULE_H #error "Please include Eigen/Core instead of including headers inside the src directory directly." #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Inverse.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014-2019 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_INVERSE_H #define EIGEN_INVERSE_H #include "./InternalHeaderCheck.h" namespace Eigen { template class InverseImpl; namespace internal { template struct traits > : traits { typedef typename XprType::PlainObject PlainObject; typedef traits BaseTraits; enum { Flags = BaseTraits::Flags & RowMajorBit }; }; } // end namespace internal /** \class Inverse * * \brief Expression of the inverse of another expression * * \tparam XprType the type of the expression we are taking the inverse * * This class represents an abstract expression of A.inverse() * and most of the time this is the only way it is used. * */ template class Inverse : public InverseImpl::StorageKind> { public: typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename internal::ref_selector::type XprTypeNested; typedef typename internal::remove_all::type XprTypeNestedCleaned; typedef typename internal::ref_selector::type Nested; typedef typename internal::remove_all::type NestedExpression; explicit EIGEN_DEVICE_FUNC Inverse(const XprType &xpr) : m_xpr(xpr) {} EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_xpr.cols(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_xpr.rows(); } EIGEN_DEVICE_FUNC const XprTypeNestedCleaned& nestedExpression() const { return m_xpr; } protected: XprTypeNested m_xpr; }; // Generic API dispatcher template class InverseImpl : public internal::generic_xpr_base >::type { public: typedef typename internal::generic_xpr_base >::type Base; typedef typename XprType::Scalar Scalar; private: Scalar coeff(Index row, Index col) const; Scalar coeff(Index i) const; }; namespace internal { /** \internal * \brief Default evaluator for Inverse expression. * * This default evaluator for Inverse expression simply evaluate the inverse into a temporary * by a call to internal::call_assignment_no_alias. * Therefore, inverse implementers only have to specialize Assignment, ...> for * there own nested expression. * * \sa class Inverse */ template struct unary_evaluator > : public evaluator::PlainObject> { typedef Inverse InverseType; typedef typename InverseType::PlainObject PlainObject; typedef evaluator Base; enum { Flags = Base::Flags | EvalBeforeNestingBit }; unary_evaluator(const InverseType& inv_xpr) : m_result(inv_xpr.rows(), inv_xpr.cols()) { ::new (static_cast(this)) Base(m_result); internal::call_assignment_no_alias(m_result, inv_xpr); } protected: PlainObject m_result; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_INVERSE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Map.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007-2010 Benoit Jacob // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MAP_H #define EIGEN_MAP_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > : public traits { typedef traits TraitsBase; enum { PlainObjectTypeInnerSize = ((traits::Flags&RowMajorBit)==RowMajorBit) ? PlainObjectType::ColsAtCompileTime : PlainObjectType::RowsAtCompileTime, InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 ? int(PlainObjectType::InnerStrideAtCompileTime) : int(StrideType::InnerStrideAtCompileTime), OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 ? (InnerStrideAtCompileTime==Dynamic || PlainObjectTypeInnerSize==Dynamic ? Dynamic : int(InnerStrideAtCompileTime) * int(PlainObjectTypeInnerSize)) : int(StrideType::OuterStrideAtCompileTime), Alignment = int(MapOptions)&int(AlignedMask), Flags0 = TraitsBase::Flags & (~NestByRefBit), Flags = is_lvalue::value ? int(Flags0) : (int(Flags0) & ~LvalueBit) }; private: enum { Options }; // Expressions don't have Options }; } /** \class Map * \ingroup Core_Module * * \brief A matrix or vector expression mapping an existing array of data. * * \tparam PlainObjectType the equivalent matrix type of the mapped data * \tparam MapOptions specifies the pointer alignment in bytes. It can be: \c #Aligned128, \c #Aligned64, \c #Aligned32, \c #Aligned16, \c #Aligned8 or \c #Unaligned. * The default is \c #Unaligned. * \tparam StrideType optionally specifies strides. By default, Map assumes the memory layout * of an ordinary, contiguous array. This can be overridden by specifying strides. * The type passed here must be a specialization of the Stride template, see examples below. * * This class represents a matrix or vector expression mapping an existing array of data. * It can be used to let Eigen interface without any overhead with non-Eigen data structures, * such as plain C arrays or structures from other libraries. By default, it assumes that the * data is laid out contiguously in memory. You can however override this by explicitly specifying * inner and outer strides. * * Here's an example of simply mapping a contiguous array as a \ref TopicStorageOrders "column-major" matrix: * \include Map_simple.cpp * Output: \verbinclude Map_simple.out * * If you need to map non-contiguous arrays, you can do so by specifying strides: * * Here's an example of mapping an array as a vector, specifying an inner stride, that is, the pointer * increment between two consecutive coefficients. Here, we're specifying the inner stride as a compile-time * fixed value. * \include Map_inner_stride.cpp * Output: \verbinclude Map_inner_stride.out * * Here's an example of mapping an array while specifying an outer stride. Here, since we're mapping * as a column-major matrix, 'outer stride' means the pointer increment between two consecutive columns. * Here, we're specifying the outer stride as a runtime parameter. Note that here \c OuterStride<> is * a short version of \c OuterStride because the default template parameter of OuterStride * is \c Dynamic * \include Map_outer_stride.cpp * Output: \verbinclude Map_outer_stride.out * * For more details and for an example of specifying both an inner and an outer stride, see class Stride. * * \b Tip: to change the array of data mapped by a Map object, you can use the C++ * placement new syntax: * * Example: \include Map_placement_new.cpp * Output: \verbinclude Map_placement_new.out * * This class is the return type of PlainObjectBase::Map() but can also be used directly. * * \sa PlainObjectBase::Map(), \ref TopicStorageOrders */ template class Map : public MapBase > { public: typedef MapBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(Map) typedef typename Base::PointerType PointerType; typedef PointerType PointerArgType; EIGEN_DEVICE_FUNC inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const { return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const { return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer() : internal::traits::OuterStrideAtCompileTime != Dynamic ? Index(internal::traits::OuterStrideAtCompileTime) : IsVectorAtCompileTime ? (this->size() * innerStride()) : int(Flags)&RowMajorBit ? (this->cols() * innerStride()) : (this->rows() * innerStride()); } /** Constructor in the fixed-size case. * * \param dataPtr pointer to the array to map * \param stride optional Stride object, passing the strides. */ EIGEN_DEVICE_FUNC explicit inline Map(PointerArgType dataPtr, const StrideType& stride = StrideType()) : Base(cast_to_pointer_type(dataPtr)), m_stride(stride) { } /** Constructor in the dynamic-size vector case. * * \param dataPtr pointer to the array to map * \param size the size of the vector expression * \param stride optional Stride object, passing the strides. */ EIGEN_DEVICE_FUNC inline Map(PointerArgType dataPtr, Index size, const StrideType& stride = StrideType()) : Base(cast_to_pointer_type(dataPtr), size), m_stride(stride) { } /** Constructor in the dynamic-size matrix case. * * \param dataPtr pointer to the array to map * \param rows the number of rows of the matrix expression * \param cols the number of columns of the matrix expression * \param stride optional Stride object, passing the strides. */ EIGEN_DEVICE_FUNC inline Map(PointerArgType dataPtr, Index rows, Index cols, const StrideType& stride = StrideType()) : Base(cast_to_pointer_type(dataPtr), rows, cols), m_stride(stride) { } EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) protected: StrideType m_stride; }; } // end namespace Eigen #endif // EIGEN_MAP_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/MapBase.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007-2010 Benoit Jacob // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MAPBASE_H #define EIGEN_MAPBASE_H #define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \ EIGEN_STATIC_ASSERT((int(internal::evaluator::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \ YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT) #include "./InternalHeaderCheck.h" namespace Eigen { /** \ingroup Core_Module * * \brief Base class for dense Map and Block expression with direct access * * This base class provides the const low-level accessors (e.g. coeff, coeffRef) of dense * Map and Block objects with direct access. * Typical users do not have to directly deal with this class. * * This class can be extended by through the macro plugin \c EIGEN_MAPBASE_PLUGIN. * See \link TopicCustomizing_Plugins customizing Eigen \endlink for details. * * The \c Derived class has to provide the following two methods describing the memory layout: * \code Index innerStride() const; \endcode * \code Index outerStride() const; \endcode * * \sa class Map, class Block */ template class MapBase : public internal::dense_xpr_base::type { public: typedef typename internal::dense_xpr_base::type Base; enum { RowsAtCompileTime = internal::traits::RowsAtCompileTime, ColsAtCompileTime = internal::traits::ColsAtCompileTime, InnerStrideAtCompileTime = internal::traits::InnerStrideAtCompileTime, SizeAtCompileTime = Base::SizeAtCompileTime }; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; typedef typename internal::conditional< bool(internal::is_lvalue::value), Scalar *, const Scalar *>::type PointerType; using Base::derived; // using Base::RowsAtCompileTime; // using Base::ColsAtCompileTime; // using Base::SizeAtCompileTime; using Base::MaxRowsAtCompileTime; using Base::MaxColsAtCompileTime; using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; using Base::IsRowMajor; using Base::rows; using Base::cols; using Base::size; using Base::coeff; using Base::coeffRef; using Base::lazyAssign; using Base::eval; using Base::innerStride; using Base::outerStride; using Base::rowStride; using Base::colStride; // bug 217 - compile error on ICC 11.1 using Base::operator=; typedef typename Base::CoeffReturnType CoeffReturnType; /** \copydoc DenseBase::rows() */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_rows.value(); } /** \copydoc DenseBase::cols() */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_cols.value(); } /** Returns a pointer to the first coefficient of the matrix or vector. * * \note When addressing this data, make sure to honor the strides returned by innerStride() and outerStride(). * * \sa innerStride(), outerStride() */ EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_data; } /** \copydoc PlainObjectBase::coeff(Index,Index) const */ EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index rowId, Index colId) const { return m_data[colId * colStride() + rowId * rowStride()]; } /** \copydoc PlainObjectBase::coeff(Index) const */ EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index index) const { EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) return m_data[index * innerStride()]; } /** \copydoc PlainObjectBase::coeffRef(Index,Index) const */ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { return this->m_data[colId * colStride() + rowId * rowStride()]; } /** \copydoc PlainObjectBase::coeffRef(Index) const */ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) return this->m_data[index * innerStride()]; } /** \internal */ template inline PacketScalar packet(Index rowId, Index colId) const { return internal::ploadt (m_data + (colId * colStride() + rowId * rowStride())); } /** \internal */ template inline PacketScalar packet(Index index) const { EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) return internal::ploadt(m_data + index * innerStride()); } /** \internal Constructor for fixed size matrices or vectors */ EIGEN_DEVICE_FUNC explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) { EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) checkSanity(); } /** \internal Constructor for dynamically sized vectors */ EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index vecSize) : m_data(dataPtr), m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)), m_cols(ColsAtCompileTime == Dynamic ? vecSize : Index(ColsAtCompileTime)) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) eigen_assert(vecSize >= 0); eigen_assert(dataPtr == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == vecSize); checkSanity(); } /** \internal Constructor for dynamically sized matrices */ EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index rows, Index cols) : m_data(dataPtr), m_rows(rows), m_cols(cols) { eigen_assert( (dataPtr == 0) || ( rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols))); checkSanity(); } #ifdef EIGEN_MAPBASE_PLUGIN #include EIGEN_MAPBASE_PLUGIN #endif protected: EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase) EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase) template EIGEN_DEVICE_FUNC void checkSanity(typename internal::enable_if<(internal::traits::Alignment>0),void*>::type = 0) const { #if EIGEN_MAX_ALIGN_BYTES>0 // innerStride() is not set yet when this function is called, so we optimistically assume the lowest plausible value: const Index minInnerStride = InnerStrideAtCompileTime == Dynamic ? 1 : Index(InnerStrideAtCompileTime); EIGEN_ONLY_USED_FOR_DEBUG(minInnerStride); eigen_assert(( ((internal::UIntPtr(m_data) % internal::traits::Alignment) == 0) || (cols() * rows() * minInnerStride * sizeof(Scalar)) < internal::traits::Alignment ) && "data is not aligned"); #endif } template EIGEN_DEVICE_FUNC void checkSanity(typename internal::enable_if::Alignment==0,void*>::type = 0) const {} PointerType m_data; const internal::variable_if_dynamic m_rows; const internal::variable_if_dynamic m_cols; }; /** \ingroup Core_Module * * \brief Base class for non-const dense Map and Block expression with direct access * * This base class provides the non-const low-level accessors (e.g. coeff and coeffRef) of * dense Map and Block objects with direct access. * It inherits MapBase which defines the const variant for reading specific entries. * * \sa class Map, class Block */ template class MapBase : public MapBase { typedef MapBase ReadOnlyMapBase; public: typedef MapBase Base; typedef typename Base::Scalar Scalar; typedef typename Base::PacketScalar PacketScalar; typedef typename Base::StorageIndex StorageIndex; typedef typename Base::PointerType PointerType; using Base::derived; using Base::rows; using Base::cols; using Base::size; using Base::coeff; using Base::coeffRef; using Base::innerStride; using Base::outerStride; using Base::rowStride; using Base::colStride; typedef typename internal::conditional< internal::is_lvalue::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; EIGEN_DEVICE_FUNC inline const Scalar* data() const { return this->m_data; } EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return this->m_data; } // no const-cast here so non-const-correct code will give a compile error EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col) { return this->m_data[col * colStride() + row * rowStride()]; } EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue& coeffRef(Index index) { EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) return this->m_data[index * innerStride()]; } template inline void writePacket(Index row, Index col, const PacketScalar& val) { internal::pstoret (this->m_data + (col * colStride() + row * rowStride()), val); } template inline void writePacket(Index index, const PacketScalar& val) { EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) internal::pstoret (this->m_data + index * innerStride(), val); } EIGEN_DEVICE_FUNC explicit inline MapBase(PointerType dataPtr) : Base(dataPtr) {} EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index vecSize) : Base(dataPtr, vecSize) {} EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index rows, Index cols) : Base(dataPtr, rows, cols) {} EIGEN_DEVICE_FUNC Derived& operator=(const MapBase& other) { ReadOnlyMapBase::Base::operator=(other); return derived(); } // In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base, // see bugs 821 and 920. using ReadOnlyMapBase::Base::operator=; protected: EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase) EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase) }; #undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS } // end namespace Eigen #endif // EIGEN_MAPBASE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/MathFunctions.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATHFUNCTIONS_H #define EIGEN_MATHFUNCTIONS_H // TODO this should better be moved to NumTraits // Source: WolframAlpha #define EIGEN_PI 3.141592653589793238462643383279502884197169399375105820974944592307816406L #define EIGEN_LOG2E 1.442695040888963407359924681001892137426645954152985934135449406931109219L #define EIGEN_LN2 0.693147180559945309417232121458176568075500134360255254120680009493393621L #include "./InternalHeaderCheck.h" namespace Eigen { // On WINCE, std::abs is defined for int only, so let's defined our own overloads: // This issue has been confirmed with MSVC 2008 only, but the issue might exist for more recent versions too. #if EIGEN_OS_WINCE && EIGEN_COMP_MSVC && EIGEN_COMP_MSVC<=1500 long abs(long x) { return (labs(x)); } double abs(double x) { return (fabs(x)); } float abs(float x) { return (fabsf(x)); } long double abs(long double x) { return (fabsl(x)); } #endif namespace internal { /** \internal \class global_math_functions_filtering_base * * What it does: * Defines a typedef 'type' as follows: * - if type T has a member typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl, then * global_math_functions_filtering_base::type is a typedef for it. * - otherwise, global_math_functions_filtering_base::type is a typedef for T. * * How it's used: * To allow to defined the global math functions (like sin...) in certain cases, like the Array expressions. * When you do sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know * is that it inherits ArrayBase. So we implement a partial specialization of sin_impl for ArrayBase. * So we must make sure to use sin_impl > and not sin_impl, otherwise our partial specialization * won't be used. How does sin know that? That's exactly what global_math_functions_filtering_base tells it. * * How it's implemented: * SFINAE in the style of enable_if. Highly susceptible of breaking compilers. With GCC, it sure does work, but if you replace * the typename dummy by an integer template parameter, it doesn't work anymore! */ template struct global_math_functions_filtering_base { typedef T type; }; template struct always_void { typedef void type; }; template struct global_math_functions_filtering_base ::type > { typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type; }; #define EIGEN_MATHFUNC_IMPL(func, scalar) Eigen::internal::func##_impl::type> #define EIGEN_MATHFUNC_RETVAL(func, scalar) typename Eigen::internal::func##_retval::type>::type /**************************************************************************** * Implementation of real * ****************************************************************************/ template::IsComplex> struct real_default_impl { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return x; } }; template struct real_default_impl { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { using std::real; return real(x); } }; template struct real_impl : real_default_impl {}; #if defined(EIGEN_GPU_COMPILE_PHASE) template struct real_impl > { typedef T RealScalar; EIGEN_DEVICE_FUNC static inline T run(const std::complex& x) { return x.real(); } }; #endif template struct real_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of imag * ****************************************************************************/ template::IsComplex> struct imag_default_impl { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar&) { return RealScalar(0); } }; template struct imag_default_impl { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { using std::imag; return imag(x); } }; template struct imag_impl : imag_default_impl {}; #if defined(EIGEN_GPU_COMPILE_PHASE) template struct imag_impl > { typedef T RealScalar; EIGEN_DEVICE_FUNC static inline T run(const std::complex& x) { return x.imag(); } }; #endif template struct imag_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of real_ref * ****************************************************************************/ template struct real_ref_impl { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar& run(Scalar& x) { return reinterpret_cast(&x)[0]; } EIGEN_DEVICE_FUNC static inline const RealScalar& run(const Scalar& x) { return reinterpret_cast(&x)[0]; } }; template struct real_ref_retval { typedef typename NumTraits::Real & type; }; /**************************************************************************** * Implementation of imag_ref * ****************************************************************************/ template struct imag_ref_default_impl { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar& run(Scalar& x) { return reinterpret_cast(&x)[1]; } EIGEN_DEVICE_FUNC static inline const RealScalar& run(const Scalar& x) { return reinterpret_cast(&x)[1]; } }; template struct imag_ref_default_impl { EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline Scalar run(Scalar&) { return Scalar(0); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline const Scalar run(const Scalar&) { return Scalar(0); } }; template struct imag_ref_impl : imag_ref_default_impl::IsComplex> {}; template struct imag_ref_retval { typedef typename NumTraits::Real & type; }; /**************************************************************************** * Implementation of conj * ****************************************************************************/ template::IsComplex> struct conj_default_impl { EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { return x; } }; template struct conj_default_impl { EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { using std::conj; return conj(x); } }; template::IsComplex> struct conj_impl : conj_default_impl {}; template struct conj_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of abs2 * ****************************************************************************/ template struct abs2_impl_default { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return x*x; } }; template struct abs2_impl_default // IsComplex { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return x.real()*x.real() + x.imag()*x.imag(); } }; template struct abs2_impl { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return abs2_impl_default::IsComplex>::run(x); } }; template struct abs2_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of sqrt/rsqrt * ****************************************************************************/ template struct sqrt_impl { EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE Scalar run(const Scalar& x) { EIGEN_USING_STD(sqrt); return sqrt(x); } }; // Complex sqrt defined in MathFunctionsImpl.h. template EIGEN_DEVICE_FUNC std::complex complex_sqrt(const std::complex& a_x); // Custom implementation is faster than `std::sqrt`, works on // GPU, and correctly handles special cases (unlike MSVC). template struct sqrt_impl > { EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE std::complex run(const std::complex& x) { return complex_sqrt(x); } }; template struct sqrt_retval { typedef Scalar type; }; // Default implementation relies on numext::sqrt, at bottom of file. template struct rsqrt_impl; // Complex rsqrt defined in MathFunctionsImpl.h. template EIGEN_DEVICE_FUNC std::complex complex_rsqrt(const std::complex& a_x); template struct rsqrt_impl > { EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE std::complex run(const std::complex& x) { return complex_rsqrt(x); } }; template struct rsqrt_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of norm1 * ****************************************************************************/ template struct norm1_default_impl; template struct norm1_default_impl { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { EIGEN_USING_STD(abs); return abs(x.real()) + abs(x.imag()); } }; template struct norm1_default_impl { EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { EIGEN_USING_STD(abs); return abs(x); } }; template struct norm1_impl : norm1_default_impl::IsComplex> {}; template struct norm1_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of hypot * ****************************************************************************/ template struct hypot_impl; template struct hypot_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of cast * ****************************************************************************/ template struct cast_impl { EIGEN_DEVICE_FUNC static inline NewType run(const OldType& x) { return static_cast(x); } }; // Casting from S -> Complex leads to an implicit conversion from S to T, // generating warnings on clang. Here we explicitly cast the real component. template struct cast_impl::IsComplex && NumTraits::IsComplex >::type> { EIGEN_DEVICE_FUNC static inline NewType run(const OldType& x) { typedef typename NumTraits::Real NewReal; return static_cast(static_cast(x)); } }; // here, for once, we're plainly returning NewType: we don't want cast to do weird things. template EIGEN_DEVICE_FUNC inline NewType cast(const OldType& x) { return cast_impl::run(x); } /**************************************************************************** * Implementation of round * ****************************************************************************/ template struct round_impl { EIGEN_STATIC_ASSERT((!NumTraits::IsComplex), NUMERIC_TYPE_MUST_BE_REAL) EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { #if EIGEN_HAS_CXX11_MATH EIGEN_USING_STD(round); #endif return Scalar(round(x)); } }; #if !EIGEN_HAS_CXX11_MATH #if EIGEN_HAS_C99_MATH // Use ::roundf for float. template<> struct round_impl { EIGEN_DEVICE_FUNC static inline float run(const float& x) { return ::roundf(x); } }; #else template struct round_using_floor_ceil_impl { EIGEN_STATIC_ASSERT((!NumTraits::IsComplex), NUMERIC_TYPE_MUST_BE_REAL) EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { // Without C99 round/roundf, resort to floor/ceil. EIGEN_USING_STD(floor); EIGEN_USING_STD(ceil); // If not enough precision to resolve a decimal at all, return the input. // Otherwise, adding 0.5 can trigger an increment by 1. const Scalar limit = Scalar(1ull << (NumTraits::digits() - 1)); if (x >= limit || x <= -limit) { return x; } return (x > Scalar(0)) ? Scalar(floor(x + Scalar(0.5))) : Scalar(ceil(x - Scalar(0.5))); } }; template<> struct round_impl : round_using_floor_ceil_impl {}; template<> struct round_impl : round_using_floor_ceil_impl {}; #endif // EIGEN_HAS_C99_MATH #endif // !EIGEN_HAS_CXX11_MATH template struct round_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of rint * ****************************************************************************/ template struct rint_impl { EIGEN_STATIC_ASSERT((!NumTraits::IsComplex), NUMERIC_TYPE_MUST_BE_REAL) EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { #if EIGEN_HAS_CXX11_MATH EIGEN_USING_STD(rint); #endif return rint(x); } }; #if !EIGEN_HAS_CXX11_MATH template<> struct rint_impl { EIGEN_DEVICE_FUNC static inline double run(const double& x) { return ::rint(x); } }; template<> struct rint_impl { EIGEN_DEVICE_FUNC static inline float run(const float& x) { return ::rintf(x); } }; #endif template struct rint_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of arg * ****************************************************************************/ // Visual Studio 2017 has a bug where arg(float) returns 0 for negative inputs. // This seems to be fixed in VS 2019. #if EIGEN_HAS_CXX11_MATH && (!EIGEN_COMP_MSVC || EIGEN_COMP_MSVC >= 1920) // std::arg is only defined for types of std::complex, or integer types or float/double/long double template::IsComplex || is_integral::value || is_same::value || is_same::value || is_same::value > struct arg_default_impl; template struct arg_default_impl { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { #if defined(EIGEN_HIP_DEVICE_COMPILE) // HIP does not seem to have a native device side implementation for the math routine "arg" using std::arg; #else EIGEN_USING_STD(arg); #endif return static_cast(arg(x)); } }; // Must be non-complex floating-point type (e.g. half/bfloat16). template struct arg_default_impl { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return (x < Scalar(0)) ? RealScalar(EIGEN_PI) : RealScalar(0); } }; #else template::IsComplex> struct arg_default_impl { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return (x < RealScalar(0)) ? RealScalar(EIGEN_PI) : RealScalar(0); } }; template struct arg_default_impl { typedef typename NumTraits::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { EIGEN_USING_STD(arg); return arg(x); } }; #endif template struct arg_impl : arg_default_impl {}; template struct arg_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of expm1 * ****************************************************************************/ // This implementation is based on GSL Math's expm1. namespace std_fallback { // fallback expm1 implementation in case there is no expm1(Scalar) function in namespace of Scalar, // or that there is no suitable std::expm1 function available. Implementation // attributed to Kahan. See: http://www.plunk.org/~hatch/rightway.php. template EIGEN_DEVICE_FUNC inline Scalar expm1(const Scalar& x) { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) typedef typename NumTraits::Real RealScalar; EIGEN_USING_STD(exp); Scalar u = exp(x); if (numext::equal_strict(u, Scalar(1))) { return x; } Scalar um1 = u - RealScalar(1); if (numext::equal_strict(um1, Scalar(-1))) { return RealScalar(-1); } EIGEN_USING_STD(log); Scalar logu = log(u); return numext::equal_strict(u, logu) ? u : (u - RealScalar(1)) * x / logu; } } template struct expm1_impl { EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) #if EIGEN_HAS_CXX11_MATH using std::expm1; #else using std_fallback::expm1; #endif return expm1(x); } }; template struct expm1_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of log * ****************************************************************************/ // Complex log defined in MathFunctionsImpl.h. template EIGEN_DEVICE_FUNC std::complex complex_log(const std::complex& z); template struct log_impl { EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { EIGEN_USING_STD(log); return static_cast(log(x)); } }; template struct log_impl > { EIGEN_DEVICE_FUNC static inline std::complex run(const std::complex& z) { return complex_log(z); } }; /**************************************************************************** * Implementation of log1p * ****************************************************************************/ namespace std_fallback { // fallback log1p implementation in case there is no log1p(Scalar) function in namespace of Scalar, // or that there is no suitable std::log1p function available template EIGEN_DEVICE_FUNC inline Scalar log1p(const Scalar& x) { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) typedef typename NumTraits::Real RealScalar; EIGEN_USING_STD(log); Scalar x1p = RealScalar(1) + x; Scalar log_1p = log_impl::run(x1p); const bool is_small = numext::equal_strict(x1p, Scalar(1)); const bool is_inf = numext::equal_strict(x1p, log_1p); return (is_small || is_inf) ? x : x * (log_1p / (x1p - RealScalar(1))); } } template struct log1p_impl { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { #if EIGEN_HAS_CXX11_MATH using std::log1p; #else using std_fallback::log1p; #endif return log1p(x); } }; // Specialization for complex types that are not supported by std::log1p. template struct log1p_impl > { EIGEN_STATIC_ASSERT_NON_INTEGER(RealScalar) EIGEN_DEVICE_FUNC static inline std::complex run( const std::complex& x) { return std_fallback::log1p(x); } }; template struct log1p_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of pow * ****************************************************************************/ template::IsInteger&&NumTraits::IsInteger> struct pow_impl { //typedef Scalar retval; typedef typename ScalarBinaryOpTraits >::ReturnType result_type; static EIGEN_DEVICE_FUNC inline result_type run(const ScalarX& x, const ScalarY& y) { EIGEN_USING_STD(pow); return pow(x, y); } }; template struct pow_impl { typedef ScalarX result_type; static EIGEN_DEVICE_FUNC inline ScalarX run(ScalarX x, ScalarY y) { ScalarX res(1); eigen_assert(!NumTraits::IsSigned || y >= 0); if(y & 1) res *= x; y >>= 1; while(y) { x *= x; if(y&1) res *= x; y >>= 1; } return res; } }; /**************************************************************************** * Implementation of random * ****************************************************************************/ template struct random_default_impl {}; template struct random_impl : random_default_impl::IsComplex, NumTraits::IsInteger> {}; template struct random_retval { typedef Scalar type; }; template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y); template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(); template struct random_default_impl { static inline Scalar run(const Scalar& x, const Scalar& y) { return x + (y-x) * Scalar(std::rand()) / Scalar(RAND_MAX); } static inline Scalar run() { return run(Scalar(NumTraits::IsSigned ? -1 : 0), Scalar(1)); } }; enum { meta_floor_log2_terminate, meta_floor_log2_move_up, meta_floor_log2_move_down, meta_floor_log2_bogus }; template struct meta_floor_log2_selector { enum { middle = (lower + upper) / 2, value = (upper <= lower + 1) ? int(meta_floor_log2_terminate) : (n < (1 << middle)) ? int(meta_floor_log2_move_down) : (n==0) ? int(meta_floor_log2_bogus) : int(meta_floor_log2_move_up) }; }; template::value> struct meta_floor_log2 {}; template struct meta_floor_log2 { enum { value = meta_floor_log2::middle>::value }; }; template struct meta_floor_log2 { enum { value = meta_floor_log2::middle, upper>::value }; }; template struct meta_floor_log2 { enum { value = (n >= ((unsigned int)(1) << (lower+1))) ? lower+1 : lower }; }; template struct meta_floor_log2 { // no value, error at compile time }; template struct random_default_impl { static inline Scalar run(const Scalar& x, const Scalar& y) { if (y <= x) return x; // ScalarU is the unsigned counterpart of Scalar, possibly Scalar itself. typedef typename make_unsigned::type ScalarU; // ScalarX is the widest of ScalarU and unsigned int. // We'll deal only with ScalarX and unsigned int below thus avoiding signed // types and arithmetic and signed overflows (which are undefined behavior). typedef typename conditional<(ScalarU(-1) > unsigned(-1)), ScalarU, unsigned>::type ScalarX; // The following difference doesn't overflow, provided our integer types are two's // complement and have the same number of padding bits in signed and unsigned variants. // This is the case in most modern implementations of C++. ScalarX range = ScalarX(y) - ScalarX(x); ScalarX offset = 0; ScalarX divisor = 1; ScalarX multiplier = 1; const unsigned rand_max = RAND_MAX; if (range <= rand_max) divisor = (rand_max + 1) / (range + 1); else multiplier = 1 + range / (rand_max + 1); // Rejection sampling. do { offset = (unsigned(std::rand()) * multiplier) / divisor; } while (offset > range); return Scalar(ScalarX(x) + offset); } static inline Scalar run() { #ifdef EIGEN_MAKING_DOCS return run(Scalar(NumTraits::IsSigned ? -10 : 0), Scalar(10)); #else enum { rand_bits = meta_floor_log2<(unsigned int)(RAND_MAX)+1>::value, scalar_bits = sizeof(Scalar) * CHAR_BIT, shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits)), offset = NumTraits::IsSigned ? (1 << (EIGEN_PLAIN_ENUM_MIN(rand_bits,scalar_bits)-1)) : 0 }; return Scalar((std::rand() >> shift) - offset); #endif } }; template struct random_default_impl { static inline Scalar run(const Scalar& x, const Scalar& y) { return Scalar(random(x.real(), y.real()), random(x.imag(), y.imag())); } static inline Scalar run() { typedef typename NumTraits::Real RealScalar; return Scalar(random(), random()); } }; template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y); } template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random() { return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(); } // Implementation of is* functions // std::is* do not work with fast-math and gcc, std::is* are available on MSVC 2013 and newer, as well as in clang. #if (EIGEN_HAS_CXX11_MATH && !(EIGEN_COMP_GNUC_STRICT && __FINITE_MATH_ONLY__)) || (EIGEN_COMP_MSVC>=1800) || (EIGEN_COMP_CLANG) #define EIGEN_USE_STD_FPCLASSIFY 1 #else #define EIGEN_USE_STD_FPCLASSIFY 0 #endif template EIGEN_DEVICE_FUNC typename internal::enable_if::value,bool>::type isnan_impl(const T&) { return false; } template EIGEN_DEVICE_FUNC typename internal::enable_if::value,bool>::type isinf_impl(const T&) { return false; } template EIGEN_DEVICE_FUNC typename internal::enable_if::value,bool>::type isfinite_impl(const T&) { return true; } template EIGEN_DEVICE_FUNC typename internal::enable_if<(!internal::is_integral::value)&&(!NumTraits::IsComplex),bool>::type isfinite_impl(const T& x) { #if defined(EIGEN_GPU_COMPILE_PHASE) return (::isfinite)(x); #elif EIGEN_USE_STD_FPCLASSIFY using std::isfinite; return isfinite EIGEN_NOT_A_MACRO (x); #else return x<=NumTraits::highest() && x>=NumTraits::lowest(); #endif } template EIGEN_DEVICE_FUNC typename internal::enable_if<(!internal::is_integral::value)&&(!NumTraits::IsComplex),bool>::type isinf_impl(const T& x) { #if defined(EIGEN_GPU_COMPILE_PHASE) return (::isinf)(x); #elif EIGEN_USE_STD_FPCLASSIFY using std::isinf; return isinf EIGEN_NOT_A_MACRO (x); #else return x>NumTraits::highest() || x::lowest(); #endif } template EIGEN_DEVICE_FUNC typename internal::enable_if<(!internal::is_integral::value)&&(!NumTraits::IsComplex),bool>::type isnan_impl(const T& x) { #if defined(EIGEN_GPU_COMPILE_PHASE) return (::isnan)(x); #elif EIGEN_USE_STD_FPCLASSIFY using std::isnan; return isnan EIGEN_NOT_A_MACRO (x); #else return x != x; #endif } #if (!EIGEN_USE_STD_FPCLASSIFY) #if EIGEN_COMP_MSVC template EIGEN_DEVICE_FUNC bool isinf_msvc_helper(T x) { return _fpclass(x)==_FPCLASS_NINF || _fpclass(x)==_FPCLASS_PINF; } //MSVC defines a _isnan builtin function, but for double only EIGEN_DEVICE_FUNC inline bool isnan_impl(const long double& x) { return _isnan(x)!=0; } EIGEN_DEVICE_FUNC inline bool isnan_impl(const double& x) { return _isnan(x)!=0; } EIGEN_DEVICE_FUNC inline bool isnan_impl(const float& x) { return _isnan(x)!=0; } EIGEN_DEVICE_FUNC inline bool isinf_impl(const long double& x) { return isinf_msvc_helper(x); } EIGEN_DEVICE_FUNC inline bool isinf_impl(const double& x) { return isinf_msvc_helper(x); } EIGEN_DEVICE_FUNC inline bool isinf_impl(const float& x) { return isinf_msvc_helper(x); } #elif (defined __FINITE_MATH_ONLY__ && __FINITE_MATH_ONLY__ && EIGEN_COMP_GNUC) #if EIGEN_GNUC_AT_LEAST(5,0) #define EIGEN_TMP_NOOPT_ATTRIB EIGEN_DEVICE_FUNC inline __attribute__((optimize("no-finite-math-only"))) #else // NOTE the inline qualifier and noinline attribute are both needed: the former is to avoid linking issue (duplicate symbol), // while the second prevent too aggressive optimizations in fast-math mode: #define EIGEN_TMP_NOOPT_ATTRIB EIGEN_DEVICE_FUNC inline __attribute__((noinline,optimize("no-finite-math-only"))) #endif template<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const long double& x) { return __builtin_isnan(x); } template<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const double& x) { return __builtin_isnan(x); } template<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const float& x) { return __builtin_isnan(x); } template<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const double& x) { return __builtin_isinf(x); } template<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const float& x) { return __builtin_isinf(x); } template<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const long double& x) { return __builtin_isinf(x); } #undef EIGEN_TMP_NOOPT_ATTRIB #endif #endif // The following overload are defined at the end of this file template EIGEN_DEVICE_FUNC bool isfinite_impl(const std::complex& x); template EIGEN_DEVICE_FUNC bool isnan_impl(const std::complex& x); template EIGEN_DEVICE_FUNC bool isinf_impl(const std::complex& x); template T generic_fast_tanh_float(const T& a_x); } // end namespace internal /**************************************************************************** * Generic math functions * ****************************************************************************/ namespace numext { #if (!defined(EIGEN_GPUCC) || defined(EIGEN_CONSTEXPR_ARE_DEVICE_FUNC)) template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y) { EIGEN_USING_STD(min) return min EIGEN_NOT_A_MACRO (x,y); } template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y) { EIGEN_USING_STD(max) return max EIGEN_NOT_A_MACRO (x,y); } #else template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y) { return y < x ? y : x; } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float mini(const float& x, const float& y) { return fminf(x, y); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double mini(const double& x, const double& y) { return fmin(x, y); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE long double mini(const long double& x, const long double& y) { #if defined(EIGEN_HIPCC) // no "fminl" on HIP yet return (x < y) ? x : y; #else return fminl(x, y); #endif } template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y) { return x < y ? y : x; } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float maxi(const float& x, const float& y) { return fmaxf(x, y); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double maxi(const double& x, const double& y) { return fmax(x, y); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE long double maxi(const long double& x, const long double& y) { #if defined(EIGEN_HIPCC) // no "fmaxl" on HIP yet return (x > y) ? x : y; #else return fmaxl(x, y); #endif } #endif #if defined(SYCL_DEVICE_ONLY) #define SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_char) \ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_short) \ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_int) \ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_long) #define SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_char) \ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_short) \ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_int) \ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_long) #define SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_uchar) \ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_ushort) \ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_uint) \ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_ulong) #define SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_uchar) \ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_ushort) \ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_uint) \ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_ulong) #define SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(NAME, FUNC) \ SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) \ SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY(NAME, FUNC) #define SYCL_SPECIALIZE_INTEGER_TYPES_UNARY(NAME, FUNC) \ SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) \ SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY(NAME, FUNC) #define SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(NAME, FUNC) \ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, cl::sycl::cl_float) \ SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC,cl::sycl::cl_double) #define SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(NAME, FUNC) \ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, cl::sycl::cl_float) \ SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC,cl::sycl::cl_double) #define SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(NAME, FUNC, RET_TYPE) \ SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, cl::sycl::cl_float) \ SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, cl::sycl::cl_double) #define SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE) \ template<> \ EIGEN_DEVICE_FUNC \ EIGEN_ALWAYS_INLINE RET_TYPE NAME(const ARG_TYPE& x) { \ return cl::sycl::FUNC(x); \ } #define SYCL_SPECIALIZE_UNARY_FUNC(NAME, FUNC, TYPE) \ SYCL_SPECIALIZE_GEN_UNARY_FUNC(NAME, FUNC, TYPE, TYPE) #define SYCL_SPECIALIZE_GEN1_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE1, ARG_TYPE2) \ template<> \ EIGEN_DEVICE_FUNC \ EIGEN_ALWAYS_INLINE RET_TYPE NAME(const ARG_TYPE1& x, const ARG_TYPE2& y) { \ return cl::sycl::FUNC(x, y); \ } #define SYCL_SPECIALIZE_GEN2_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE) \ SYCL_SPECIALIZE_GEN1_BINARY_FUNC(NAME, FUNC, RET_TYPE, ARG_TYPE, ARG_TYPE) #define SYCL_SPECIALIZE_BINARY_FUNC(NAME, FUNC, TYPE) \ SYCL_SPECIALIZE_GEN2_BINARY_FUNC(NAME, FUNC, TYPE, TYPE) SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(mini, min) SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(mini, fmin) SYCL_SPECIALIZE_INTEGER_TYPES_BINARY(maxi, max) SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(maxi, fmax) #endif template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x); } template EIGEN_DEVICE_FUNC inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x) { return internal::real_ref_impl::run(x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x) { return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(arg, Scalar) arg(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(arg, Scalar)::run(x); } template EIGEN_DEVICE_FUNC inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x) { return internal::imag_ref_impl::run(x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x) { return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x); } EIGEN_DEVICE_FUNC inline bool abs2(bool x) { return x; } template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T absdiff(const T& x, const T& y) { return x > y ? x - y : y - x; } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float absdiff(const float& x, const float& y) { return fabsf(x - y); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double absdiff(const double& x, const double& y) { return fabs(x - y); } #if !defined(EIGEN_GPUCC) // HIP and CUDA do not support long double. template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE long double absdiff(const long double& x, const long double& y) { return fabsl(x - y); } #endif template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(hypot, hypot) #endif template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(log1p, Scalar) log1p(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(log1p, Scalar)::run(x); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(log1p, log1p) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float log1p(const float &x) { return ::log1pf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double log1p(const double &x) { return ::log1p(x); } #endif template EIGEN_DEVICE_FUNC inline typename internal::pow_impl::result_type pow(const ScalarX& x, const ScalarY& y) { return internal::pow_impl::run(x, y); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(pow, pow) #endif template EIGEN_DEVICE_FUNC bool (isnan) (const T &x) { return internal::isnan_impl(x); } template EIGEN_DEVICE_FUNC bool (isinf) (const T &x) { return internal::isinf_impl(x); } template EIGEN_DEVICE_FUNC bool (isfinite)(const T &x) { return internal::isfinite_impl(x); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isnan, isnan, bool) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isinf, isinf, bool) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE(isfinite, isfinite, bool) #endif template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(rint, Scalar) rint(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(rint, Scalar)::run(x); } template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(round, Scalar) round(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(round, Scalar)::run(x); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(round, round) #endif template EIGEN_DEVICE_FUNC T (floor)(const T& x) { EIGEN_USING_STD(floor) return floor(x); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(floor, floor) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float floor(const float &x) { return ::floorf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double floor(const double &x) { return ::floor(x); } #endif template EIGEN_DEVICE_FUNC T (ceil)(const T& x) { EIGEN_USING_STD(ceil); return ceil(x); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(ceil, ceil) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float ceil(const float &x) { return ::ceilf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double ceil(const double &x) { return ::ceil(x); } #endif /** Log base 2 for 32 bits positive integers. * Conveniently returns 0 for x==0. */ inline int log2(int x) { eigen_assert(x>=0); unsigned int v(x); static const int table[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; return table[(v * 0x07C4ACDDU) >> 27]; } /** \returns the square root of \a x. * * It is essentially equivalent to * \code using std::sqrt; return sqrt(x); \endcode * but slightly faster for float/double and some compilers (e.g., gcc), thanks to * specializations when SSE is enabled. * * It's usage is justified in performance critical functions, like norm/normalize. */ template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE EIGEN_MATHFUNC_RETVAL(sqrt, Scalar) sqrt(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(sqrt, Scalar)::run(x); } // Boolean specialization, avoids implicit float to bool conversion (-Wimplicit-conversion-floating-point-to-bool). template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_DEVICE_FUNC bool sqrt(const bool &x) { return x; } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sqrt, sqrt) #endif /** \returns the reciprocal square root of \a x. **/ template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T rsqrt(const T& x) { return internal::rsqrt_impl::run(x); } template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T log(const T &x) { return internal::log_impl::run(x); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(log, log) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float log(const float &x) { return ::logf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double log(const double &x) { return ::log(x); } #endif template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename internal::enable_if::IsSigned || NumTraits::IsComplex,typename NumTraits::Real>::type abs(const T &x) { EIGEN_USING_STD(abs); return abs(x); } template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename internal::enable_if::IsSigned || NumTraits::IsComplex),typename NumTraits::Real>::type abs(const T &x) { return x; } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_INTEGER_TYPES_UNARY(abs, abs) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(abs, fabs) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float abs(const float &x) { return ::fabsf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double abs(const double &x) { return ::fabs(x); } template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float abs(const std::complex& x) { return ::hypotf(x.real(), x.imag()); } template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double abs(const std::complex& x) { return ::hypot(x.real(), x.imag()); } #endif template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T exp(const T &x) { EIGEN_USING_STD(exp); return exp(x); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(exp, exp) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float exp(const float &x) { return ::expf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double exp(const double &x) { return ::exp(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::complex exp(const std::complex& x) { float com = ::expf(x.real()); float res_real = com * ::cosf(x.imag()); float res_imag = com * ::sinf(x.imag()); return std::complex(res_real, res_imag); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::complex exp(const std::complex& x) { double com = ::exp(x.real()); double res_real = com * ::cos(x.imag()); double res_imag = com * ::sin(x.imag()); return std::complex(res_real, res_imag); } #endif template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(expm1, Scalar) expm1(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(expm1, Scalar)::run(x); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(expm1, expm1) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float expm1(const float &x) { return ::expm1f(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double expm1(const double &x) { return ::expm1(x); } #endif template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T cos(const T &x) { EIGEN_USING_STD(cos); return cos(x); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(cos,cos) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float cos(const float &x) { return ::cosf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double cos(const double &x) { return ::cos(x); } #endif template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T sin(const T &x) { EIGEN_USING_STD(sin); return sin(x); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sin, sin) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float sin(const float &x) { return ::sinf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double sin(const double &x) { return ::sin(x); } #endif template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T tan(const T &x) { EIGEN_USING_STD(tan); return tan(x); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(tan, tan) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float tan(const float &x) { return ::tanf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double tan(const double &x) { return ::tan(x); } #endif template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T acos(const T &x) { EIGEN_USING_STD(acos); return acos(x); } #if EIGEN_HAS_CXX11_MATH template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T acosh(const T &x) { EIGEN_USING_STD(acosh); return static_cast(acosh(x)); } #endif #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(acos, acos) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(acosh, acosh) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float acos(const float &x) { return ::acosf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double acos(const double &x) { return ::acos(x); } #endif template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T asin(const T &x) { EIGEN_USING_STD(asin); return asin(x); } #if EIGEN_HAS_CXX11_MATH template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T asinh(const T &x) { EIGEN_USING_STD(asinh); return static_cast(asinh(x)); } #endif #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(asin, asin) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(asinh, asinh) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float asin(const float &x) { return ::asinf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double asin(const double &x) { return ::asin(x); } #endif template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T atan(const T &x) { EIGEN_USING_STD(atan); return static_cast(atan(x)); } #if EIGEN_HAS_CXX11_MATH template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T atanh(const T &x) { EIGEN_USING_STD(atanh); return static_cast(atanh(x)); } #endif #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(atan, atan) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(atanh, atanh) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float atan(const float &x) { return ::atanf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double atan(const double &x) { return ::atan(x); } #endif template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T cosh(const T &x) { EIGEN_USING_STD(cosh); return static_cast(cosh(x)); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(cosh, cosh) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float cosh(const float &x) { return ::coshf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double cosh(const double &x) { return ::cosh(x); } #endif template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T sinh(const T &x) { EIGEN_USING_STD(sinh); return static_cast(sinh(x)); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(sinh, sinh) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float sinh(const float &x) { return ::sinhf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double sinh(const double &x) { return ::sinh(x); } #endif template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T tanh(const T &x) { EIGEN_USING_STD(tanh); return tanh(x); } #if (!defined(EIGEN_GPUCC)) && EIGEN_FAST_MATH && !defined(SYCL_DEVICE_ONLY) EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float tanh(float x) { return internal::generic_fast_tanh_float(x); } #endif #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_UNARY(tanh, tanh) #endif #if defined(EIGEN_GPUCC) template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float tanh(const float &x) { return ::tanhf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double tanh(const double &x) { return ::tanh(x); } #endif template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T fmod(const T& a, const T& b) { EIGEN_USING_STD(fmod); return fmod(a, b); } #if defined(SYCL_DEVICE_ONLY) SYCL_SPECIALIZE_FLOATING_TYPES_BINARY(fmod, fmod) #endif #if defined(EIGEN_GPUCC) template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float fmod(const float& a, const float& b) { return ::fmodf(a, b); } template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double fmod(const double& a, const double& b) { return ::fmod(a, b); } #endif #if defined(SYCL_DEVICE_ONLY) #undef SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_BINARY #undef SYCL_SPECIALIZE_SIGNED_INTEGER_TYPES_UNARY #undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_BINARY #undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY #undef SYCL_SPECIALIZE_INTEGER_TYPES_BINARY #undef SYCL_SPECIALIZE_UNSIGNED_INTEGER_TYPES_UNARY #undef SYCL_SPECIALIZE_FLOATING_TYPES_BINARY #undef SYCL_SPECIALIZE_FLOATING_TYPES_UNARY #undef SYCL_SPECIALIZE_FLOATING_TYPES_UNARY_FUNC_RET_TYPE #undef SYCL_SPECIALIZE_GEN_UNARY_FUNC #undef SYCL_SPECIALIZE_UNARY_FUNC #undef SYCL_SPECIALIZE_GEN1_BINARY_FUNC #undef SYCL_SPECIALIZE_GEN2_BINARY_FUNC #undef SYCL_SPECIALIZE_BINARY_FUNC #endif } // end namespace numext namespace internal { template EIGEN_DEVICE_FUNC bool isfinite_impl(const std::complex& x) { return (numext::isfinite)(numext::real(x)) && (numext::isfinite)(numext::imag(x)); } template EIGEN_DEVICE_FUNC bool isnan_impl(const std::complex& x) { return (numext::isnan)(numext::real(x)) || (numext::isnan)(numext::imag(x)); } template EIGEN_DEVICE_FUNC bool isinf_impl(const std::complex& x) { return ((numext::isinf)(numext::real(x)) || (numext::isinf)(numext::imag(x))) && (!(numext::isnan)(x)); } /**************************************************************************** * Implementation of fuzzy comparisons * ****************************************************************************/ template struct scalar_fuzzy_default_impl {}; template struct scalar_fuzzy_default_impl { typedef typename NumTraits::Real RealScalar; template EIGEN_DEVICE_FUNC static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) { return numext::abs(x) <= numext::abs(y) * prec; } EIGEN_DEVICE_FUNC static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { return numext::abs(x - y) <= numext::mini(numext::abs(x), numext::abs(y)) * prec; } EIGEN_DEVICE_FUNC static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec) { return x <= y || isApprox(x, y, prec); } }; template struct scalar_fuzzy_default_impl { typedef typename NumTraits::Real RealScalar; template EIGEN_DEVICE_FUNC static inline bool isMuchSmallerThan(const Scalar& x, const Scalar&, const RealScalar&) { return x == Scalar(0); } EIGEN_DEVICE_FUNC static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar&) { return x == y; } EIGEN_DEVICE_FUNC static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar&) { return x <= y; } }; template struct scalar_fuzzy_default_impl { typedef typename NumTraits::Real RealScalar; template EIGEN_DEVICE_FUNC static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) { return numext::abs2(x) <= numext::abs2(y) * prec * prec; } EIGEN_DEVICE_FUNC static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { return numext::abs2(x - y) <= numext::mini(numext::abs2(x), numext::abs2(y)) * prec * prec; } }; template struct scalar_fuzzy_impl : scalar_fuzzy_default_impl::IsComplex, NumTraits::IsInteger> {}; template EIGEN_DEVICE_FUNC inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const typename NumTraits::Real &precision = NumTraits::dummy_precision()) { return scalar_fuzzy_impl::template isMuchSmallerThan(x, y, precision); } template EIGEN_DEVICE_FUNC inline bool isApprox(const Scalar& x, const Scalar& y, const typename NumTraits::Real &precision = NumTraits::dummy_precision()) { return scalar_fuzzy_impl::isApprox(x, y, precision); } template EIGEN_DEVICE_FUNC inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const typename NumTraits::Real &precision = NumTraits::dummy_precision()) { return scalar_fuzzy_impl::isApproxOrLessThan(x, y, precision); } /****************************************** *** The special case of the bool type *** ******************************************/ template<> struct random_impl { static inline bool run() { return random(0,1)==0 ? false : true; } static inline bool run(const bool& a, const bool& b) { return random(a, b)==0 ? false : true; } }; template<> struct scalar_fuzzy_impl { typedef bool RealScalar; template EIGEN_DEVICE_FUNC static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&) { return !x; } EIGEN_DEVICE_FUNC static inline bool isApprox(bool x, bool y, bool) { return x == y; } EIGEN_DEVICE_FUNC static inline bool isApproxOrLessThan(const bool& x, const bool& y, const bool&) { return (!x) || y; } }; } // end namespace internal // Default implementations that rely on other numext implementations namespace internal { // Specialization for complex types that are not supported by std::expm1. template struct expm1_impl > { EIGEN_STATIC_ASSERT_NON_INTEGER(RealScalar) EIGEN_DEVICE_FUNC static inline std::complex run( const std::complex& x) { RealScalar xr = x.real(); RealScalar xi = x.imag(); // expm1(z) = exp(z) - 1 // = exp(x + i * y) - 1 // = exp(x) * (cos(y) + i * sin(y)) - 1 // = exp(x) * cos(y) - 1 + i * exp(x) * sin(y) // Imag(expm1(z)) = exp(x) * sin(y) // Real(expm1(z)) = exp(x) * cos(y) - 1 // = exp(x) * cos(y) - 1. // = expm1(x) + exp(x) * (cos(y) - 1) // = expm1(x) + exp(x) * (2 * sin(y / 2) ** 2) RealScalar erm1 = numext::expm1(xr); RealScalar er = erm1 + RealScalar(1.); RealScalar sin2 = numext::sin(xi / RealScalar(2.)); sin2 = sin2 * sin2; RealScalar s = numext::sin(xi); RealScalar real_part = erm1 - RealScalar(2.) * er * sin2; return std::complex(real_part, er * s); } }; template struct rsqrt_impl { EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE T run(const T& x) { return T(1)/numext::sqrt(x); } }; #if defined(EIGEN_GPU_COMPILE_PHASE) template struct conj_impl, true> { EIGEN_DEVICE_FUNC static inline std::complex run(const std::complex& x) { return std::complex(numext::real(x), -numext::imag(x)); } }; #endif } // end namespace internal } // end namespace Eigen #endif // EIGEN_MATHFUNCTIONS_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/MathFunctionsImpl.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com) // Copyright (C) 2016 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATHFUNCTIONSIMPL_H #define EIGEN_MATHFUNCTIONSIMPL_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { /** \internal \returns the hyperbolic tan of \a a (coeff-wise) Doesn't do anything fancy, just a 13/6-degree rational interpolant which is accurate up to a couple of ulps in the (approximate) range [-8, 8], outside of which tanh(x) = +/-1 in single precision. The input is clamped to the range [-c, c]. The value c is chosen as the smallest value where the approximation evaluates to exactly 1. In the reange [-0.0004, 0.0004] the approximation tanh(x) ~= x is used for better accuracy as x tends to zero. This implementation works on both scalars and packets. */ template T generic_fast_tanh_float(const T& a_x) { // Clamp the inputs to the range [-c, c] #ifdef EIGEN_VECTORIZE_FMA const T plus_clamp = pset1(7.99881172180175781f); const T minus_clamp = pset1(-7.99881172180175781f); #else const T plus_clamp = pset1(7.90531110763549805f); const T minus_clamp = pset1(-7.90531110763549805f); #endif const T tiny = pset1(0.0004f); const T x = pmax(pmin(a_x, plus_clamp), minus_clamp); const T tiny_mask = pcmp_lt(pabs(a_x), tiny); // The monomial coefficients of the numerator polynomial (odd). const T alpha_1 = pset1(4.89352455891786e-03f); const T alpha_3 = pset1(6.37261928875436e-04f); const T alpha_5 = pset1(1.48572235717979e-05f); const T alpha_7 = pset1(5.12229709037114e-08f); const T alpha_9 = pset1(-8.60467152213735e-11f); const T alpha_11 = pset1(2.00018790482477e-13f); const T alpha_13 = pset1(-2.76076847742355e-16f); // The monomial coefficients of the denominator polynomial (even). const T beta_0 = pset1(4.89352518554385e-03f); const T beta_2 = pset1(2.26843463243900e-03f); const T beta_4 = pset1(1.18534705686654e-04f); const T beta_6 = pset1(1.19825839466702e-06f); // Since the polynomials are odd/even, we need x^2. const T x2 = pmul(x, x); // Evaluate the numerator polynomial p. T p = pmadd(x2, alpha_13, alpha_11); p = pmadd(x2, p, alpha_9); p = pmadd(x2, p, alpha_7); p = pmadd(x2, p, alpha_5); p = pmadd(x2, p, alpha_3); p = pmadd(x2, p, alpha_1); p = pmul(x, p); // Evaluate the denominator polynomial q. T q = pmadd(x2, beta_6, beta_4); q = pmadd(x2, q, beta_2); q = pmadd(x2, q, beta_0); // Divide the numerator by the denominator. return pselect(tiny_mask, x, pdiv(p, q)); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE RealScalar positive_real_hypot(const RealScalar& x, const RealScalar& y) { // IEEE IEC 6059 special cases. if ((numext::isinf)(x) || (numext::isinf)(y)) return NumTraits::infinity(); if ((numext::isnan)(x) || (numext::isnan)(y)) return NumTraits::quiet_NaN(); EIGEN_USING_STD(sqrt); RealScalar p, qp; p = numext::maxi(x,y); if(p==RealScalar(0)) return RealScalar(0); qp = numext::mini(y,x) / p; return p * sqrt(RealScalar(1) + qp*qp); } template struct hypot_impl { typedef typename NumTraits::Real RealScalar; static EIGEN_DEVICE_FUNC inline RealScalar run(const Scalar& x, const Scalar& y) { EIGEN_USING_STD(abs); return positive_real_hypot(abs(x), abs(y)); } }; // Generic complex sqrt implementation that correctly handles corner cases // according to https://en.cppreference.com/w/cpp/numeric/complex/sqrt template EIGEN_DEVICE_FUNC std::complex complex_sqrt(const std::complex& z) { // Computes the principal sqrt of the input. // // For a complex square root of the number x + i*y. We want to find real // numbers u and v such that // (u + i*v)^2 = x + i*y <=> // u^2 - v^2 + i*2*u*v = x + i*v. // By equating the real and imaginary parts we get: // u^2 - v^2 = x // 2*u*v = y. // // For x >= 0, this has the numerically stable solution // u = sqrt(0.5 * (x + sqrt(x^2 + y^2))) // v = y / (2 * u) // and for x < 0, // v = sign(y) * sqrt(0.5 * (-x + sqrt(x^2 + y^2))) // u = y / (2 * v) // // Letting w = sqrt(0.5 * (|x| + |z|)), // if x == 0: u = w, v = sign(y) * w // if x > 0: u = w, v = y / (2 * w) // if x < 0: u = |y| / (2 * w), v = sign(y) * w const T x = numext::real(z); const T y = numext::imag(z); const T zero = T(0); const T w = numext::sqrt(T(0.5) * (numext::abs(x) + numext::hypot(x, y))); return (numext::isinf)(y) ? std::complex(NumTraits::infinity(), y) : x == zero ? std::complex(w, y < zero ? -w : w) : x > zero ? std::complex(w, y / (2 * w)) : std::complex(numext::abs(y) / (2 * w), y < zero ? -w : w ); } // Generic complex rsqrt implementation. template EIGEN_DEVICE_FUNC std::complex complex_rsqrt(const std::complex& z) { // Computes the principal reciprocal sqrt of the input. // // For a complex reciprocal square root of the number z = x + i*y. We want to // find real numbers u and v such that // (u + i*v)^2 = 1 / (x + i*y) <=> // u^2 - v^2 + i*2*u*v = x/|z|^2 - i*v/|z|^2. // By equating the real and imaginary parts we get: // u^2 - v^2 = x/|z|^2 // 2*u*v = y/|z|^2. // // For x >= 0, this has the numerically stable solution // u = sqrt(0.5 * (x + |z|)) / |z| // v = -y / (2 * u * |z|) // and for x < 0, // v = -sign(y) * sqrt(0.5 * (-x + |z|)) / |z| // u = -y / (2 * v * |z|) // // Letting w = sqrt(0.5 * (|x| + |z|)), // if x == 0: u = w / |z|, v = -sign(y) * w / |z| // if x > 0: u = w / |z|, v = -y / (2 * w * |z|) // if x < 0: u = |y| / (2 * w * |z|), v = -sign(y) * w / |z| const T x = numext::real(z); const T y = numext::imag(z); const T zero = T(0); const T abs_z = numext::hypot(x, y); const T w = numext::sqrt(T(0.5) * (numext::abs(x) + abs_z)); const T woz = w / abs_z; // Corner cases consistent with 1/sqrt(z) on gcc/clang. return abs_z == zero ? std::complex(NumTraits::infinity(), NumTraits::quiet_NaN()) : ((numext::isinf)(x) || (numext::isinf)(y)) ? std::complex(zero, zero) : x == zero ? std::complex(woz, y < zero ? woz : -woz) : x > zero ? std::complex(woz, -y / (2 * w * abs_z)) : std::complex(numext::abs(y) / (2 * w * abs_z), y < zero ? woz : -woz ); } template EIGEN_DEVICE_FUNC std::complex complex_log(const std::complex& z) { // Computes complex log. T a = numext::abs(z); EIGEN_USING_STD(atan2); T b = atan2(z.imag(), z.real()); return std::complex(numext::log(a), b); } } // end namespace internal } // end namespace Eigen #endif // EIGEN_MATHFUNCTIONSIMPL_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Matrix.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_H #define EIGEN_MATRIX_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > { private: enum { size = internal::size_at_compile_time::ret }; typedef typename find_best_packet::type PacketScalar; enum { row_major_bit = Options_&RowMajor ? RowMajorBit : 0, is_dynamic_size_storage = MaxRows_==Dynamic || MaxCols_==Dynamic, max_size = is_dynamic_size_storage ? Dynamic : MaxRows_*MaxCols_, default_alignment = compute_default_alignment::value, actual_alignment = ((Options_&DontAlign)==0) ? default_alignment : 0, required_alignment = unpacket_traits::alignment, packet_access_bit = (packet_traits::Vectorizable && (EIGEN_UNALIGNED_VECTORIZE || (actual_alignment>=required_alignment))) ? PacketAccessBit : 0 }; public: typedef Scalar_ Scalar; typedef Dense StorageKind; typedef Eigen::Index StorageIndex; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = Rows_, ColsAtCompileTime = Cols_, MaxRowsAtCompileTime = MaxRows_, MaxColsAtCompileTime = MaxCols_, Flags = compute_matrix_flags::ret, Options = Options_, InnerStrideAtCompileTime = 1, OuterStrideAtCompileTime = (Options&RowMajor) ? ColsAtCompileTime : RowsAtCompileTime, // FIXME, the following flag in only used to define NeedsToAlign in PlainObjectBase EvaluatorFlags = LinearAccessBit | DirectAccessBit | packet_access_bit | row_major_bit, Alignment = actual_alignment }; }; } /** \class Matrix * \ingroup Core_Module * * \brief The matrix class, also used for vectors and row-vectors * * The %Matrix class is the work-horse for all \em dense (\ref dense "note") matrices and vectors within Eigen. * Vectors are matrices with one column, and row-vectors are matrices with one row. * * The %Matrix class encompasses \em both fixed-size and dynamic-size objects (\ref fixedsize "note"). * * The first three template parameters are required: * \tparam Scalar_ Numeric type, e.g. float, double, int or std::complex. * User defined scalar types are supported as well (see \ref user_defined_scalars "here"). * \tparam Rows_ Number of rows, or \b Dynamic * \tparam Cols_ Number of columns, or \b Dynamic * * The remaining template parameters are optional -- in most cases you don't have to worry about them. * \tparam Options_ A combination of either \b #RowMajor or \b #ColMajor, and of either * \b #AutoAlign or \b #DontAlign. * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required * for vectorization. It defaults to aligning matrices except for fixed sizes that aren't a multiple of the packet size. * \tparam MaxRows_ Maximum number of rows. Defaults to \a Rows_ (\ref maxrows "note"). * \tparam MaxCols_ Maximum number of columns. Defaults to \a Cols_ (\ref maxrows "note"). * * Eigen provides a number of typedefs covering the usual cases. Here are some examples: * * \li \c Matrix2d is a 2x2 square matrix of doubles (\c Matrix) * \li \c Vector4f is a vector of 4 floats (\c Matrix) * \li \c RowVector3i is a row-vector of 3 ints (\c Matrix) * * \li \c MatrixXf is a dynamic-size matrix of floats (\c Matrix) * \li \c VectorXf is a dynamic-size vector of floats (\c Matrix) * * \li \c Matrix2Xf is a partially fixed-size (dynamic-size) matrix of floats (\c Matrix) * \li \c MatrixX3d is a partially dynamic-size (fixed-size) matrix of double (\c Matrix) * * See \link matrixtypedefs this page \endlink for a complete list of predefined \em %Matrix and \em Vector typedefs. * * You can access elements of vectors and matrices using normal subscripting: * * \code * Eigen::VectorXd v(10); * v[0] = 0.1; * v[1] = 0.2; * v(0) = 0.3; * v(1) = 0.4; * * Eigen::MatrixXi m(10, 10); * m(0, 1) = 1; * m(0, 2) = 2; * m(0, 3) = 3; * \endcode * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_MATRIX_PLUGIN. * * Some notes: * *
*
\anchor dense Dense versus sparse:
*
This %Matrix class handles dense, not sparse matrices and vectors. For sparse matrices and vectors, see the Sparse module. * * Dense matrices and vectors are plain usual arrays of coefficients. All the coefficients are stored, in an ordinary contiguous array. * This is unlike Sparse matrices and vectors where the coefficients are stored as a list of nonzero coefficients.
* *
\anchor fixedsize Fixed-size versus dynamic-size:
*
Fixed-size means that the numbers of rows and columns are known are compile-time. In this case, Eigen allocates the array * of coefficients as a fixed-size array, as a class member. This makes sense for very small matrices, typically up to 4x4, sometimes up * to 16x16. Larger matrices should be declared as dynamic-size even if one happens to know their size at compile-time. * * Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they are runtime * variables, and the array of coefficients is allocated dynamically on the heap. * * Note that \em dense matrices, be they Fixed-size or Dynamic-size, do not expand dynamically in the sense of a std::map. * If you want this behavior, see the Sparse module.
* *
\anchor maxrows MaxRows_ and MaxCols_:
*
In most cases, one just leaves these parameters to the default values. * These parameters mean the maximum size of rows and columns that the matrix may have. They are useful in cases * when the exact numbers of rows and columns are not known are compile-time, but it is known at compile-time that they cannot * exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case MaxRows_ and MaxCols_ * are the dimensions of the original matrix, while Rows_ and Cols_ are Dynamic.
*
* * ABI and storage layout * * The table below summarizes the ABI of some possible Matrix instances which is fixed thorough the lifetime of Eigen 3. * * * * * * *
Matrix typeEquivalent C structure
\code Matrix \endcode\code * struct { * T *data; // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0 * Eigen::Index rows, cols; * }; * \endcode
\code * Matrix * Matrix \endcode\code * struct { * T *data; // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0 * Eigen::Index size; * }; * \endcode
\code Matrix \endcode\code * struct { * T data[Rows*Cols]; // with (size_t(data)%A(Rows*Cols*sizeof(T)))==0 * }; * \endcode
\code Matrix \endcode\code * struct { * T data[MaxRows*MaxCols]; // with (size_t(data)%A(MaxRows*MaxCols*sizeof(T)))==0 * Eigen::Index rows, cols; * }; * \endcode
* Note that in this table Rows, Cols, MaxRows and MaxCols are all positive integers. A(S) is defined to the largest possible power-of-two * smaller to EIGEN_MAX_STATIC_ALIGN_BYTES. * * \see MatrixBase for the majority of the API methods for matrices, \ref TopicClassHierarchy, * \ref TopicStorageOrders */ template class Matrix : public PlainObjectBase > { public: /** \brief Base class typedef. * \sa PlainObjectBase */ typedef PlainObjectBase Base; enum { Options = Options_ }; EIGEN_DENSE_PUBLIC_INTERFACE(Matrix) typedef typename Base::PlainObject PlainObject; using Base::base; using Base::coeffRef; /** * \brief Assigns matrices to each other. * * \note This is a special case of the templated operator=. Its purpose is * to prevent a default operator= from hiding the templated operator=. * * \callgraph */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const Matrix& other) { return Base::_set(other); } /** \internal * \brief Copies the value of the expression \a other into \c *this with automatic resizing. * * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), * it will be initialized. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const DenseBase& other) { return Base::_set(other); } /* Here, doxygen failed to copy the brief information when using \copydoc */ /** * \brief Copies the generic expression \a other into *this. * \copydetails DenseBase::operator=(const EigenBase &other) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const EigenBase &other) { return Base::operator=(other); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const ReturnByValue& func) { return Base::operator=(func); } /** \brief Default constructor. * * For fixed-size matrices, does nothing. * * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix * is called a null matrix. This constructor is the unique way to create null matrices: resizing * a matrix to 0 is not supported. * * \sa resize(Index,Index) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix() : Base() { EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } // FIXME is it still needed EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Matrix(internal::constructor_without_unaligned_array_assert) : Base(internal::constructor_without_unaligned_array_assert()) { EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #if EIGEN_HAS_RVALUE_REFERENCES EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(Matrix&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible::value) : Base(std::move(other)) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(Matrix&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable::value) { Base::operator=(std::move(other)); return *this; } #endif #if EIGEN_HAS_CXX11 /** \copydoc PlainObjectBase(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&... args) * * Example: \include Matrix_variadic_ctor_cxx11.cpp * Output: \verbinclude Matrix_variadic_ctor_cxx11.out * * \sa Matrix(const std::initializer_list>&) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) : Base(a0, a1, a2, a3, args...) {} /** \brief Constructs a Matrix and initializes it from the coefficients given as initializer-lists grouped by row. \cpp11 * * In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients: * * Example: \include Matrix_initializer_list_23_cxx11.cpp * Output: \verbinclude Matrix_initializer_list_23_cxx11.out * * Each of the inner initializer lists must contain the exact same number of elements, otherwise an assertion is triggered. * * In the case of a compile-time column vector, implicit transposition from a single row is allowed. * Therefore VectorXd{{1,2,3,4,5}} is legal and the more verbose syntax * RowVectorXd{{1},{2},{3},{4},{5}} can be avoided: * * Example: \include Matrix_initializer_list_vector_cxx11.cpp * Output: \verbinclude Matrix_initializer_list_vector_cxx11.out * * In the case of fixed-sized matrices, the initializer list sizes must exactly match the matrix sizes, * and implicit transposition is allowed for compile-time vectors only. * * \sa Matrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) */ EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE Matrix(const std::initializer_list>& list) : Base(list) {} #endif // end EIGEN_HAS_CXX11 #ifndef EIGEN_PARSED_BY_DOXYGEN // This constructor is for both 1x1 matrices and dynamic vectors template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Matrix(const T& x) { Base::template _init1(x); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const T0& x, const T1& y) { Base::template _init2(x, y); } #else /** \brief Constructs a fixed-sized matrix initialized with coefficients starting at \a data */ EIGEN_DEVICE_FUNC explicit Matrix(const Scalar *data); /** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors * * This is useful for dynamic-size vectors. For fixed-size vectors, * it is redundant to pass these parameters, so one should use the default constructor * Matrix() instead. * * \warning This constructor is disabled for fixed-size \c 1x1 matrices. For instance, * calling Matrix(1) will call the initialization constructor: Matrix(const Scalar&). * For fixed-size \c 1x1 matrices it is therefore recommended to use the default * constructor Matrix() instead, especially when using one of the non standard * \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives). */ EIGEN_STRONG_INLINE explicit Matrix(Index dim); /** \brief Constructs an initialized 1x1 matrix with the given coefficient * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */ Matrix(const Scalar& x); /** \brief Constructs an uninitialized matrix with \a rows rows and \a cols columns. * * This is useful for dynamic-size matrices. For fixed-size matrices, * it is redundant to pass these parameters, so one should use the default constructor * Matrix() instead. * * \warning This constructor is disabled for fixed-size \c 1x2 and \c 2x1 vectors. For instance, * calling Matrix2f(2,1) will call the initialization constructor: Matrix(const Scalar& x, const Scalar& y). * For fixed-size \c 1x2 or \c 2x1 vectors it is therefore recommended to use the default * constructor Matrix() instead, especially when using one of the non standard * \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives). */ EIGEN_DEVICE_FUNC Matrix(Index rows, Index cols); /** \brief Constructs an initialized 2D vector with given coefficients * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */ Matrix(const Scalar& x, const Scalar& y); #endif // end EIGEN_PARSED_BY_DOXYGEN /** \brief Constructs an initialized 3D vector with given coefficients * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3) m_storage.data()[0] = x; m_storage.data()[1] = y; m_storage.data()[2] = z; } /** \brief Constructs an initialized 4D vector with given coefficients * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4) m_storage.data()[0] = x; m_storage.data()[1] = y; m_storage.data()[2] = z; m_storage.data()[3] = w; } /** \brief Copy constructor */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Matrix& other) : Base(other) { } /** \brief Copy constructor for generic expressions. * \sa MatrixBase::operator=(const EigenBase&) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const EigenBase &other) : Base(other.derived()) { } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { return 1; } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return this->innerSize(); } /////////// Geometry module /////////// template EIGEN_DEVICE_FUNC explicit Matrix(const RotationBase& r); template EIGEN_DEVICE_FUNC Matrix& operator=(const RotationBase& r); // allow to extend Matrix outside Eigen #ifdef EIGEN_MATRIX_PLUGIN #include EIGEN_MATRIX_PLUGIN #endif protected: template friend struct internal::conservative_resize_like_impl; using Base::m_storage; }; /** \defgroup matrixtypedefs Global matrix typedefs * * \ingroup Core_Module * * %Eigen defines several typedef shortcuts for most common matrix and vector types. * * The general patterns are the following: * * \c MatrixSizeType where \c Size can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size, * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd * for complex double. * * For example, \c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \c MatrixXf is a dynamic-size matrix of floats. * * There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is * a fixed-size vector of 4 complex floats. * * With \cpp11, template alias are also defined for common sizes. * They follow the same pattern as above except that the scalar type suffix is replaced by a * template parameter, i.e.: * - `MatrixSize` where `Size` can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size. * - `MatrixXSize` and `MatrixSizeX` where `Size` can be \c 2,\c 3,\c 4 for hybrid dynamic/fixed matrices. * - `VectorSize` and `RowVectorSize` for column and row vectors. * * With \cpp11, you can also use fully generic column and row vector types: `Vector` and `RowVector`. * * \sa class Matrix */ #define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ /** \ingroup matrixtypedefs */ \ typedef Matrix Matrix##SizeSuffix##TypeSuffix; \ /** \ingroup matrixtypedefs */ \ typedef Matrix Vector##SizeSuffix##TypeSuffix; \ /** \ingroup matrixtypedefs */ \ typedef Matrix RowVector##SizeSuffix##TypeSuffix; #define EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \ /** \ingroup matrixtypedefs */ \ typedef Matrix Matrix##Size##X##TypeSuffix; \ /** \ingroup matrixtypedefs */ \ typedef Matrix Matrix##X##Size##TypeSuffix; #define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \ EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \ EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \ EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 4) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex, cf) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex, cd) #undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES #undef EIGEN_MAKE_TYPEDEFS #undef EIGEN_MAKE_FIXED_TYPEDEFS #if EIGEN_HAS_CXX11 #define EIGEN_MAKE_TYPEDEFS(Size, SizeSuffix) \ /** \ingroup matrixtypedefs */ \ /** \brief \cpp11 */ \ template \ using Matrix##SizeSuffix = Matrix; \ /** \ingroup matrixtypedefs */ \ /** \brief \cpp11 */ \ template \ using Vector##SizeSuffix = Matrix; \ /** \ingroup matrixtypedefs */ \ /** \brief \cpp11 */ \ template \ using RowVector##SizeSuffix = Matrix; #define EIGEN_MAKE_FIXED_TYPEDEFS(Size) \ /** \ingroup matrixtypedefs */ \ /** \brief \cpp11 */ \ template \ using Matrix##Size##X = Matrix; \ /** \ingroup matrixtypedefs */ \ /** \brief \cpp11 */ \ template \ using Matrix##X##Size = Matrix; EIGEN_MAKE_TYPEDEFS(2, 2) EIGEN_MAKE_TYPEDEFS(3, 3) EIGEN_MAKE_TYPEDEFS(4, 4) EIGEN_MAKE_TYPEDEFS(Dynamic, X) EIGEN_MAKE_FIXED_TYPEDEFS(2) EIGEN_MAKE_FIXED_TYPEDEFS(3) EIGEN_MAKE_FIXED_TYPEDEFS(4) /** \ingroup matrixtypedefs * \brief \cpp11 */ template using Vector = Matrix; /** \ingroup matrixtypedefs * \brief \cpp11 */ template using RowVector = Matrix; #undef EIGEN_MAKE_TYPEDEFS #undef EIGEN_MAKE_FIXED_TYPEDEFS #endif // EIGEN_HAS_CXX11 } // end namespace Eigen #endif // EIGEN_MATRIX_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/MatrixBase.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2009 Benoit Jacob // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIXBASE_H #define EIGEN_MATRIXBASE_H #include "./InternalHeaderCheck.h" namespace Eigen { /** \class MatrixBase * \ingroup Core_Module * * \brief Base class for all dense matrices, vectors, and expressions * * This class is the base that is inherited by all matrix, vector, and related expression * types. Most of the Eigen API is contained in this class, and its base classes. Other important * classes for the Eigen API are Matrix, and VectorwiseOp. * * Note that some methods are defined in other modules such as the \ref LU_Module LU module * for all functions related to matrix inversions. * * \tparam Derived is the derived type, e.g. a matrix type, or an expression, etc. * * When writing a function taking Eigen objects as argument, if you want your function * to take as argument any matrix, vector, or expression, just let it take a * MatrixBase argument. As an example, here is a function printFirstRow which, given * a matrix, vector, or expression \a x, prints the first row of \a x. * * \code template void printFirstRow(const Eigen::MatrixBase& x) { cout << x.row(0) << endl; } * \endcode * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_MATRIXBASE_PLUGIN. * * \sa \blank \ref TopicClassHierarchy */ template class MatrixBase : public DenseBase { public: #ifndef EIGEN_PARSED_BY_DOXYGEN typedef MatrixBase StorageBaseType; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; typedef DenseBase Base; using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; using Base::SizeAtCompileTime; using Base::MaxRowsAtCompileTime; using Base::MaxColsAtCompileTime; using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; using Base::derived; using Base::const_cast_derived; using Base::rows; using Base::cols; using Base::size; using Base::coeff; using Base::coeffRef; using Base::lazyAssign; using Base::eval; using Base::operator-; using Base::operator+=; using Base::operator-=; using Base::operator*=; using Base::operator/=; typedef typename Base::CoeffReturnType CoeffReturnType; typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType; typedef typename Base::RowXpr RowXpr; typedef typename Base::ColXpr ColXpr; #endif // not EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN /** type of the equivalent square matrix */ typedef Matrix SquareMatrixType; #endif // not EIGEN_PARSED_BY_DOXYGEN /** \returns the size of the main diagonal, which is min(rows(),cols()). * \sa rows(), cols(), SizeAtCompileTime. */ EIGEN_DEVICE_FUNC inline Index diagonalSize() const { return (numext::mini)(rows(),cols()); } typedef typename Base::PlainObject PlainObject; #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal Represents a matrix with all coefficients equal to one another*/ typedef CwiseNullaryOp,PlainObject> ConstantReturnType; /** \internal the return type of MatrixBase::adjoint() */ typedef typename internal::conditional::IsComplex, CwiseUnaryOp, ConstTransposeReturnType>, ConstTransposeReturnType >::type AdjointReturnType; /** \internal Return type of eigenvalues() */ typedef Matrix, internal::traits::ColsAtCompileTime, 1, ColMajor> EigenvaluesReturnType; /** \internal the return type of identity */ typedef CwiseNullaryOp,PlainObject> IdentityReturnType; /** \internal the return type of unit vectors */ typedef Block, SquareMatrixType>, internal::traits::RowsAtCompileTime, internal::traits::ColsAtCompileTime> BasisReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase #define EIGEN_DOC_UNARY_ADDONS(X,Y) # include "../plugins/CommonCwiseBinaryOps.h" # include "../plugins/MatrixCwiseUnaryOps.h" # include "../plugins/MatrixCwiseBinaryOps.h" # ifdef EIGEN_MATRIXBASE_PLUGIN # include EIGEN_MATRIXBASE_PLUGIN # endif #undef EIGEN_CURRENT_STORAGE_BASE_CLASS #undef EIGEN_DOC_UNARY_ADDONS /** Special case of the template operator=, in order to prevent the compiler * from generating a default operator= (issue hit with g++ 4.1) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const MatrixBase& other); // We cannot inherit here via Base::operator= since it is causing // trouble with MSVC. template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase& other); template EIGEN_DEVICE_FUNC Derived& operator=(const EigenBase& other); template EIGEN_DEVICE_FUNC Derived& operator=(const ReturnByValue& other); template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const MatrixBase& other); template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const MatrixBase& other); template EIGEN_DEVICE_FUNC const Product operator*(const MatrixBase &other) const; template EIGEN_DEVICE_FUNC const Product lazyProduct(const MatrixBase &other) const; template Derived& operator*=(const EigenBase& other); template void applyOnTheLeft(const EigenBase& other); template void applyOnTheRight(const EigenBase& other); template EIGEN_DEVICE_FUNC const Product operator*(const DiagonalBase &diagonal) const; template EIGEN_DEVICE_FUNC typename ScalarBinaryOpTraits::Scalar,typename internal::traits::Scalar>::ReturnType dot(const MatrixBase& other) const; EIGEN_DEVICE_FUNC RealScalar squaredNorm() const; EIGEN_DEVICE_FUNC RealScalar norm() const; RealScalar stableNorm() const; RealScalar blueNorm() const; RealScalar hypotNorm() const; EIGEN_DEVICE_FUNC const PlainObject normalized() const; EIGEN_DEVICE_FUNC const PlainObject stableNormalized() const; EIGEN_DEVICE_FUNC void normalize(); EIGEN_DEVICE_FUNC void stableNormalize(); EIGEN_DEVICE_FUNC const AdjointReturnType adjoint() const; EIGEN_DEVICE_FUNC void adjointInPlace(); typedef Diagonal DiagonalReturnType; EIGEN_DEVICE_FUNC DiagonalReturnType diagonal(); typedef typename internal::add_const >::type ConstDiagonalReturnType; EIGEN_DEVICE_FUNC ConstDiagonalReturnType diagonal() const; template struct DiagonalIndexReturnType { typedef Diagonal Type; }; template struct ConstDiagonalIndexReturnType { typedef const Diagonal Type; }; template EIGEN_DEVICE_FUNC typename DiagonalIndexReturnType::Type diagonal(); template EIGEN_DEVICE_FUNC typename ConstDiagonalIndexReturnType::Type diagonal() const; typedef Diagonal DiagonalDynamicIndexReturnType; typedef typename internal::add_const >::type ConstDiagonalDynamicIndexReturnType; EIGEN_DEVICE_FUNC DiagonalDynamicIndexReturnType diagonal(Index index); EIGEN_DEVICE_FUNC ConstDiagonalDynamicIndexReturnType diagonal(Index index) const; template struct TriangularViewReturnType { typedef TriangularView Type; }; template struct ConstTriangularViewReturnType { typedef const TriangularView Type; }; template EIGEN_DEVICE_FUNC typename TriangularViewReturnType::Type triangularView(); template EIGEN_DEVICE_FUNC typename ConstTriangularViewReturnType::Type triangularView() const; template struct SelfAdjointViewReturnType { typedef SelfAdjointView Type; }; template struct ConstSelfAdjointViewReturnType { typedef const SelfAdjointView Type; }; template EIGEN_DEVICE_FUNC typename SelfAdjointViewReturnType::Type selfadjointView(); template EIGEN_DEVICE_FUNC typename ConstSelfAdjointViewReturnType::Type selfadjointView() const; const SparseView sparseView(const Scalar& m_reference = Scalar(0), const typename NumTraits::Real& m_epsilon = NumTraits::dummy_precision()) const; EIGEN_DEVICE_FUNC static const IdentityReturnType Identity(); EIGEN_DEVICE_FUNC static const IdentityReturnType Identity(Index rows, Index cols); EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index size, Index i); EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index i); EIGEN_DEVICE_FUNC static const BasisReturnType UnitX(); EIGEN_DEVICE_FUNC static const BasisReturnType UnitY(); EIGEN_DEVICE_FUNC static const BasisReturnType UnitZ(); EIGEN_DEVICE_FUNC static const BasisReturnType UnitW(); EIGEN_DEVICE_FUNC const DiagonalWrapper asDiagonal() const; const PermutationWrapper asPermutation() const; EIGEN_DEVICE_FUNC Derived& setIdentity(); EIGEN_DEVICE_FUNC Derived& setIdentity(Index rows, Index cols); EIGEN_DEVICE_FUNC Derived& setUnit(Index i); EIGEN_DEVICE_FUNC Derived& setUnit(Index newSize, Index i); bool isIdentity(const RealScalar& prec = NumTraits::dummy_precision()) const; bool isDiagonal(const RealScalar& prec = NumTraits::dummy_precision()) const; bool isUpperTriangular(const RealScalar& prec = NumTraits::dummy_precision()) const; bool isLowerTriangular(const RealScalar& prec = NumTraits::dummy_precision()) const; template bool isOrthogonal(const MatrixBase& other, const RealScalar& prec = NumTraits::dummy_precision()) const; bool isUnitary(const RealScalar& prec = NumTraits::dummy_precision()) const; /** \returns true if each coefficients of \c *this and \a other are all exactly equal. * \warning When using floating point scalar values you probably should rather use a * fuzzy comparison such as isApprox() * \sa isApprox(), operator!= */ template EIGEN_DEVICE_FUNC inline bool operator==(const MatrixBase& other) const { return cwiseEqual(other).all(); } /** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other. * \warning When using floating point scalar values you probably should rather use a * fuzzy comparison such as isApprox() * \sa isApprox(), operator== */ template EIGEN_DEVICE_FUNC inline bool operator!=(const MatrixBase& other) const { return cwiseNotEqual(other).any(); } NoAlias EIGEN_DEVICE_FUNC noalias(); // TODO forceAlignedAccess is temporarily disabled // Need to find a nicer workaround. inline const Derived& forceAlignedAccess() const { return derived(); } inline Derived& forceAlignedAccess() { return derived(); } template inline const Derived& forceAlignedAccessIf() const { return derived(); } template inline Derived& forceAlignedAccessIf() { return derived(); } EIGEN_DEVICE_FUNC Scalar trace() const; template EIGEN_DEVICE_FUNC RealScalar lpNorm() const; EIGEN_DEVICE_FUNC MatrixBase& matrix() { return *this; } EIGEN_DEVICE_FUNC const MatrixBase& matrix() const { return *this; } /** \returns an \link Eigen::ArrayBase Array \endlink expression of this matrix * \sa ArrayBase::matrix() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ArrayWrapper array() { return ArrayWrapper(derived()); } /** \returns a const \link Eigen::ArrayBase Array \endlink expression of this matrix * \sa ArrayBase::matrix() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const ArrayWrapper array() const { return ArrayWrapper(derived()); } /////////// LU module /////////// inline const FullPivLU fullPivLu() const; inline const PartialPivLU partialPivLu() const; inline const PartialPivLU lu() const; EIGEN_DEVICE_FUNC inline const Inverse inverse() const; template inline void computeInverseAndDetWithCheck( ResultType& inverse, typename ResultType::Scalar& determinant, bool& invertible, const RealScalar& absDeterminantThreshold = NumTraits::dummy_precision() ) const; template inline void computeInverseWithCheck( ResultType& inverse, bool& invertible, const RealScalar& absDeterminantThreshold = NumTraits::dummy_precision() ) const; EIGEN_DEVICE_FUNC Scalar determinant() const; /////////// Cholesky module /////////// inline const LLT llt() const; inline const LDLT ldlt() const; /////////// QR module /////////// inline const HouseholderQR householderQr() const; inline const ColPivHouseholderQR colPivHouseholderQr() const; inline const FullPivHouseholderQR fullPivHouseholderQr() const; inline const CompleteOrthogonalDecomposition completeOrthogonalDecomposition() const; /////////// Eigenvalues module /////////// inline EigenvaluesReturnType eigenvalues() const; inline RealScalar operatorNorm() const; /////////// SVD module /////////// inline JacobiSVD jacobiSvd(unsigned int computationOptions = 0) const; inline BDCSVD bdcSvd(unsigned int computationOptions = 0) const; /////////// Geometry module /////////// #ifndef EIGEN_PARSED_BY_DOXYGEN /// \internal helper struct to form the return type of the cross product template struct cross_product_return_type { typedef typename ScalarBinaryOpTraits::Scalar,typename internal::traits::Scalar>::ReturnType Scalar; typedef Matrix type; }; #endif // EIGEN_PARSED_BY_DOXYGEN template EIGEN_DEVICE_FUNC #ifndef EIGEN_PARSED_BY_DOXYGEN inline typename cross_product_return_type::type #else inline PlainObject #endif cross(const MatrixBase& other) const; template EIGEN_DEVICE_FUNC inline PlainObject cross3(const MatrixBase& other) const; EIGEN_DEVICE_FUNC inline PlainObject unitOrthogonal(void) const; EIGEN_DEVICE_FUNC inline Matrix eulerAngles(Index a0, Index a1, Index a2) const; // put this as separate enum value to work around possible GCC 4.3 bug (?) enum { HomogeneousReturnTypeDirection = ColsAtCompileTime==1&&RowsAtCompileTime==1 ? ((internal::traits::Flags&RowMajorBit)==RowMajorBit ? Horizontal : Vertical) : ColsAtCompileTime==1 ? Vertical : Horizontal }; typedef Homogeneous HomogeneousReturnType; EIGEN_DEVICE_FUNC inline HomogeneousReturnType homogeneous() const; enum { SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1 }; typedef Block::ColsAtCompileTime==1 ? SizeMinusOne : 1, internal::traits::ColsAtCompileTime==1 ? 1 : SizeMinusOne> ConstStartMinusOne; typedef EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(ConstStartMinusOne,Scalar,quotient) HNormalizedReturnType; EIGEN_DEVICE_FUNC inline const HNormalizedReturnType hnormalized() const; ////////// Householder module /////////// EIGEN_DEVICE_FUNC void makeHouseholderInPlace(Scalar& tau, RealScalar& beta); template EIGEN_DEVICE_FUNC void makeHouseholder(EssentialPart& essential, Scalar& tau, RealScalar& beta) const; template EIGEN_DEVICE_FUNC void applyHouseholderOnTheLeft(const EssentialPart& essential, const Scalar& tau, Scalar* workspace); template EIGEN_DEVICE_FUNC void applyHouseholderOnTheRight(const EssentialPart& essential, const Scalar& tau, Scalar* workspace); ///////// Jacobi module ///////// template EIGEN_DEVICE_FUNC void applyOnTheLeft(Index p, Index q, const JacobiRotation& j); template EIGEN_DEVICE_FUNC void applyOnTheRight(Index p, Index q, const JacobiRotation& j); ///////// SparseCore module ///////// template EIGEN_STRONG_INLINE const typename SparseMatrixBase::template CwiseProductDenseReturnType::Type cwiseProduct(const SparseMatrixBase &other) const { return other.cwiseProduct(derived()); } ///////// MatrixFunctions module ///////// typedef typename internal::stem_function::type StemFunction; #define EIGEN_MATRIX_FUNCTION(ReturnType, Name, Description) \ /** \returns an expression of the matrix Description of \c *this. \brief This function requires the unsupported MatrixFunctions module. To compute the coefficient-wise Description use ArrayBase::##Name . */ \ const ReturnType Name() const; #define EIGEN_MATRIX_FUNCTION_1(ReturnType, Name, Description, Argument) \ /** \returns an expression of the matrix Description of \c *this. \brief This function requires the unsupported MatrixFunctions module. To compute the coefficient-wise Description use ArrayBase::##Name . */ \ const ReturnType Name(Argument) const; EIGEN_MATRIX_FUNCTION(MatrixExponentialReturnValue, exp, exponential) /** \brief Helper function for the unsupported MatrixFunctions module.*/ const MatrixFunctionReturnValue matrixFunction(StemFunction f) const; EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cosh, hyperbolic cosine) EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sinh, hyperbolic sine) #if EIGEN_HAS_CXX11_MATH EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, atanh, inverse hyperbolic cosine) EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, acosh, inverse hyperbolic cosine) EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, asinh, inverse hyperbolic sine) #endif EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cos, cosine) EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sin, sine) EIGEN_MATRIX_FUNCTION(MatrixSquareRootReturnValue, sqrt, square root) EIGEN_MATRIX_FUNCTION(MatrixLogarithmReturnValue, log, logarithm) EIGEN_MATRIX_FUNCTION_1(MatrixPowerReturnValue, pow, power to \c p, const RealScalar& p) EIGEN_MATRIX_FUNCTION_1(MatrixComplexPowerReturnValue, pow, power to \c p, const std::complex& p) protected: EIGEN_DEFAULT_COPY_CONSTRUCTOR(MatrixBase) EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MatrixBase) private: EIGEN_DEVICE_FUNC explicit MatrixBase(int); EIGEN_DEVICE_FUNC MatrixBase(int,int); template EIGEN_DEVICE_FUNC explicit MatrixBase(const MatrixBase&); protected: // mixing arrays and matrices is not legal template Derived& operator+=(const ArrayBase& ) {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} // mixing arrays and matrices is not legal template Derived& operator-=(const ArrayBase& ) {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} }; /*************************************************************************** * Implementation of matrix base methods ***************************************************************************/ /** replaces \c *this by \c *this * \a other. * * \returns a reference to \c *this * * Example: \include MatrixBase_applyOnTheRight.cpp * Output: \verbinclude MatrixBase_applyOnTheRight.out */ template template inline Derived& MatrixBase::operator*=(const EigenBase &other) { other.derived().applyThisOnTheRight(derived()); return derived(); } /** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=(). * * Example: \include MatrixBase_applyOnTheRight.cpp * Output: \verbinclude MatrixBase_applyOnTheRight.out */ template template inline void MatrixBase::applyOnTheRight(const EigenBase &other) { other.derived().applyThisOnTheRight(derived()); } /** replaces \c *this by \a other * \c *this. * * Example: \include MatrixBase_applyOnTheLeft.cpp * Output: \verbinclude MatrixBase_applyOnTheLeft.out */ template template inline void MatrixBase::applyOnTheLeft(const EigenBase &other) { other.derived().applyThisOnTheLeft(derived()); } } // end namespace Eigen #endif // EIGEN_MATRIXBASE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/NestByValue.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_NESTBYVALUE_H #define EIGEN_NESTBYVALUE_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > : public traits { enum { Flags = traits::Flags & ~NestByRefBit }; }; } /** \class NestByValue * \ingroup Core_Module * * \brief Expression which must be nested by value * * \tparam ExpressionType the type of the object of which we are requiring nesting-by-value * * This class is the return type of MatrixBase::nestByValue() * and most of the time this is the only way it is used. * * \sa MatrixBase::nestByValue() */ template class NestByValue : public internal::dense_xpr_base< NestByValue >::type { public: typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(NestByValue) EIGEN_DEVICE_FUNC explicit inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {} EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); } EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; } EIGEN_DEVICE_FUNC const ExpressionType& nestedExpression() const { return m_expression; } protected: const ExpressionType m_expression; }; /** \returns an expression of the temporary version of *this. */ template EIGEN_DEVICE_FUNC inline const NestByValue DenseBase::nestByValue() const { return NestByValue(derived()); } namespace internal { // Evaluator of Solve -> eval into a temporary template struct evaluator > : public evaluator { typedef evaluator Base; EIGEN_DEVICE_FUNC explicit evaluator(const NestByValue& xpr) : Base(xpr.nestedExpression()) {} }; } } // end namespace Eigen #endif // EIGEN_NESTBYVALUE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/NoAlias.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_NOALIAS_H #define EIGEN_NOALIAS_H #include "./InternalHeaderCheck.h" namespace Eigen { /** \class NoAlias * \ingroup Core_Module * * \brief Pseudo expression providing an operator = assuming no aliasing * * \tparam ExpressionType the type of the object on which to do the lazy assignment * * This class represents an expression with special assignment operators * assuming no aliasing between the target expression and the source expression. * More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression. * It is the return type of MatrixBase::noalias() * and most of the time this is the only way it is used. * * \sa MatrixBase::noalias() */ template class StorageBase> class NoAlias { public: typedef typename ExpressionType::Scalar Scalar; EIGEN_DEVICE_FUNC explicit NoAlias(ExpressionType& expression) : m_expression(expression) {} template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase& other) { call_assignment_no_alias(m_expression, other.derived(), internal::assign_op()); return m_expression; } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase& other) { call_assignment_no_alias(m_expression, other.derived(), internal::add_assign_op()); return m_expression; } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase& other) { call_assignment_no_alias(m_expression, other.derived(), internal::sub_assign_op()); return m_expression; } EIGEN_DEVICE_FUNC ExpressionType& expression() const { return m_expression; } protected: ExpressionType& m_expression; }; /** \returns a pseudo expression of \c *this with an operator= assuming * no aliasing between \c *this and the source expression. * * More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag. * Currently, even though several expressions may alias, only product * expressions have this flag. Therefore, noalias() is only useful when * the source expression contains a matrix product. * * Here are some examples where noalias is useful: * \code * D.noalias() = A * B; * D.noalias() += A.transpose() * B; * D.noalias() -= 2 * A * B.adjoint(); * \endcode * * On the other hand the following example will lead to a \b wrong result: * \code * A.noalias() = A * B; * \endcode * because the result matrix A is also an operand of the matrix product. Therefore, * there is no alternative than evaluating A * B in a temporary, that is the default * behavior when you write: * \code * A = A * B; * \endcode * * \sa class NoAlias */ template NoAlias EIGEN_DEVICE_FUNC MatrixBase::noalias() { return NoAlias(derived()); } } // end namespace Eigen #endif // EIGEN_NOALIAS_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/NumTraits.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_NUMTRAITS_H #define EIGEN_NUMTRAITS_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { // default implementation of digits10(), based on numeric_limits if specialized, // 0 for integer types, and log10(epsilon()) otherwise. template< typename T, bool use_numeric_limits = std::numeric_limits::is_specialized, bool is_integer = NumTraits::IsInteger> struct default_digits10_impl { EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { return std::numeric_limits::digits10; } }; template struct default_digits10_impl // Floating point { EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { using std::log10; using std::ceil; typedef typename NumTraits::Real Real; return int(ceil(-log10(NumTraits::epsilon()))); } }; template struct default_digits10_impl // Integer { EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { return 0; } }; // default implementation of digits(), based on numeric_limits if specialized, // 0 for integer types, and log2(epsilon()) otherwise. template< typename T, bool use_numeric_limits = std::numeric_limits::is_specialized, bool is_integer = NumTraits::IsInteger> struct default_digits_impl { EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { return std::numeric_limits::digits; } }; template struct default_digits_impl // Floating point { EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { using std::log; using std::ceil; typedef typename NumTraits::Real Real; return int(ceil(-log(NumTraits::epsilon())/log(static_cast(2)))); } }; template struct default_digits_impl // Integer { EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static int run() { return 0; } }; } // end namespace internal namespace numext { /** \internal bit-wise cast without changing the underlying bit representation. */ // TODO: Replace by std::bit_cast (available in C++20) template EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Tgt bit_cast(const Src& src) { #if EIGEN_HAS_TYPE_TRAITS // The behaviour of memcpy is not specified for non-trivially copyable types EIGEN_STATIC_ASSERT(std::is_trivially_copyable::value, THIS_TYPE_IS_NOT_SUPPORTED); EIGEN_STATIC_ASSERT(std::is_trivially_copyable::value && std::is_default_constructible::value, THIS_TYPE_IS_NOT_SUPPORTED); #endif EIGEN_STATIC_ASSERT(sizeof(Src) == sizeof(Tgt), THIS_TYPE_IS_NOT_SUPPORTED); Tgt tgt; // Load src into registers first. This allows the memcpy to be elided by CUDA. const Src staged = src; EIGEN_USING_STD(memcpy) memcpy(&tgt, &staged, sizeof(Tgt)); return tgt; } } // namespace numext /** \class NumTraits * \ingroup Core_Module * * \brief Holds information about the various numeric (i.e. scalar) types allowed by Eigen. * * \tparam T the numeric type at hand * * This class stores enums, typedefs and static methods giving information about a numeric type. * * The provided data consists of: * \li A typedef \c Real, giving the "real part" type of \a T. If \a T is already real, * then \c Real is just a typedef to \a T. If \a T is \c std::complex then \c Real * is a typedef to \a U. * \li A typedef \c NonInteger, giving the type that should be used for operations producing non-integral values, * such as quotients, square roots, etc. If \a T is a floating-point type, then this typedef just gives * \a T again. Note however that many Eigen functions such as internal::sqrt simply refuse to * take integers. Outside of a few cases, Eigen doesn't do automatic type promotion. Thus, this typedef is * only intended as a helper for code that needs to explicitly promote types. * \li A typedef \c Literal giving the type to use for numeric literals such as "2" or "0.5". For instance, for \c std::complex, Literal is defined as \c U. * Of course, this type must be fully compatible with \a T. In doubt, just use \a T here. * \li A typedef \a Nested giving the type to use to nest a value inside of the expression tree. If you don't know what * this means, just use \a T here. * \li An enum value \a IsComplex. It is equal to 1 if \a T is a \c std::complex * type, and to 0 otherwise. * \li An enum value \a IsInteger. It is equal to \c 1 if \a T is an integer type such as \c int, * and to \c 0 otherwise. * \li Enum values ReadCost, AddCost and MulCost representing a rough estimate of the number of CPU cycles needed * to by move / add / mul instructions respectively, assuming the data is already stored in CPU registers. * Stay vague here. No need to do architecture-specific stuff. If you don't know what this means, just use \c Eigen::HugeCost. * \li An enum value \a IsSigned. It is equal to \c 1 if \a T is a signed type and to 0 if \a T is unsigned. * \li An enum value \a RequireInitialization. It is equal to \c 1 if the constructor of the numeric type \a T must * be called, and to 0 if it is safe not to call it. Default is 0 if \a T is an arithmetic type, and 1 otherwise. * \li An epsilon() function which, unlike std::numeric_limits::epsilon(), * it returns a \a Real instead of a \a T. * \li A dummy_precision() function returning a weak epsilon value. It is mainly used as a default * value by the fuzzy comparison operators. * \li highest() and lowest() functions returning the highest and lowest possible values respectively. * \li digits() function returning the number of radix digits (non-sign digits for integers, mantissa for floating-point). This is * the analogue of std::numeric_limits::digits * which is used as the default implementation if specialized. * \li digits10() function returning the number of decimal digits that can be represented without change. This is * the analogue of std::numeric_limits::digits10 * which is used as the default implementation if specialized. * \li min_exponent() and max_exponent() functions returning the highest and lowest possible values, respectively, * such that the radix raised to the power exponent-1 is a normalized floating-point number. These are equivalent to * std::numeric_limits::min_exponent/ * std::numeric_limits::max_exponent. * \li infinity() function returning a representation of positive infinity, if available. * \li quiet_NaN function returning a non-signaling "not-a-number", if available. */ template struct GenericNumTraits { enum { IsInteger = std::numeric_limits::is_integer, IsSigned = std::numeric_limits::is_signed, IsComplex = 0, RequireInitialization = internal::is_arithmetic::value ? 0 : 1, ReadCost = 1, AddCost = 1, MulCost = 1 }; typedef T Real; typedef typename internal::conditional< IsInteger, typename internal::conditional::type, T >::type NonInteger; typedef T Nested; typedef T Literal; EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline Real epsilon() { return numext::numeric_limits::epsilon(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline int digits10() { return internal::default_digits10_impl::run(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline int digits() { return internal::default_digits_impl::run(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline int min_exponent() { return numext::numeric_limits::min_exponent; } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline int max_exponent() { return numext::numeric_limits::max_exponent; } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline Real dummy_precision() { // make sure to override this for floating-point types return Real(0); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline T highest() { return (numext::numeric_limits::max)(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline T lowest() { return IsInteger ? (numext::numeric_limits::min)() : static_cast(-(numext::numeric_limits::max)()); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline T infinity() { return numext::numeric_limits::infinity(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline T quiet_NaN() { return numext::numeric_limits::quiet_NaN(); } }; template struct NumTraits : GenericNumTraits {}; template<> struct NumTraits : GenericNumTraits { EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline float dummy_precision() { return 1e-5f; } }; template<> struct NumTraits : GenericNumTraits { EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline double dummy_precision() { return 1e-12; } }; template<> struct NumTraits : GenericNumTraits { EIGEN_CONSTEXPR static inline long double dummy_precision() { return 1e-15l; } }; template struct NumTraits > : GenericNumTraits > { typedef Real_ Real; typedef typename NumTraits::Literal Literal; enum { IsComplex = 1, RequireInitialization = NumTraits::RequireInitialization, ReadCost = 2 * NumTraits::ReadCost, AddCost = 2 * NumTraits::AddCost, MulCost = 4 * NumTraits::MulCost + 2 * NumTraits::AddCost }; EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline Real epsilon() { return NumTraits::epsilon(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline Real dummy_precision() { return NumTraits::dummy_precision(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline int digits10() { return NumTraits::digits10(); } }; template struct NumTraits > { typedef Array ArrayType; typedef typename NumTraits::Real RealScalar; typedef Array Real; typedef typename NumTraits::NonInteger NonIntegerScalar; typedef Array NonInteger; typedef ArrayType & Nested; typedef typename NumTraits::Literal Literal; enum { IsComplex = NumTraits::IsComplex, IsInteger = NumTraits::IsInteger, IsSigned = NumTraits::IsSigned, RequireInitialization = 1, ReadCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * int(NumTraits::ReadCost), AddCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * int(NumTraits::AddCost), MulCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * int(NumTraits::MulCost) }; EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline RealScalar epsilon() { return NumTraits::epsilon(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static inline RealScalar dummy_precision() { return NumTraits::dummy_precision(); } EIGEN_CONSTEXPR static inline int digits10() { return NumTraits::digits10(); } }; template<> struct NumTraits : GenericNumTraits { enum { RequireInitialization = 1, ReadCost = HugeCost, AddCost = HugeCost, MulCost = HugeCost }; EIGEN_CONSTEXPR static inline int digits10() { return 0; } private: static inline std::string epsilon(); static inline std::string dummy_precision(); static inline std::string lowest(); static inline std::string highest(); static inline std::string infinity(); static inline std::string quiet_NaN(); }; // Empty specialization for void to allow template specialization based on NumTraits::Real with T==void and SFINAE. template<> struct NumTraits {}; template<> struct NumTraits : GenericNumTraits {}; } // end namespace Eigen #endif // EIGEN_NUMTRAITS_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/PartialReduxEvaluator.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011-2018 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARTIALREDUX_H #define EIGEN_PARTIALREDUX_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { /*************************************************************************** * * This file provides evaluators for partial reductions. * There are two modes: * * - scalar path: simply calls the respective function on the column or row. * -> nothing special here, all the tricky part is handled by the return * types of VectorwiseOp's members. They embed the functor calling the * respective DenseBase's member function. * * - vectorized path: implements a packet-wise reductions followed by * some (optional) processing of the outcome, e.g., division by n for mean. * * For the vectorized path let's observe that the packet-size and outer-unrolling * are both decided by the assignment logic. So all we have to do is to decide * on the inner unrolling. * * For the unrolling, we can reuse "internal::redux_vec_unroller" from Redux.h, * but be need to be careful to specify correct increment. * ***************************************************************************/ /* logic deciding a strategy for unrolling of vectorized paths */ template struct packetwise_redux_traits { enum { OuterSize = int(Evaluator::IsRowMajor) ? Evaluator::RowsAtCompileTime : Evaluator::ColsAtCompileTime, Cost = OuterSize == Dynamic ? HugeCost : OuterSize * Evaluator::CoeffReadCost + (OuterSize-1) * functor_traits::Cost, Unrolling = Cost <= EIGEN_UNROLLING_LIMIT ? CompleteUnrolling : NoUnrolling }; }; /* Value to be returned when size==0 , by default let's return 0 */ template EIGEN_DEVICE_FUNC PacketType packetwise_redux_empty_value(const Func& ) { const typename unpacket_traits::type zero(0); return pset1(zero); } /* For products the default is 1 */ template EIGEN_DEVICE_FUNC PacketType packetwise_redux_empty_value(const scalar_product_op& ) { return pset1(Scalar(1)); } /* Perform the actual reduction */ template::Unrolling > struct packetwise_redux_impl; /* Perform the actual reduction with unrolling */ template struct packetwise_redux_impl { typedef redux_novec_unroller Base; typedef typename Evaluator::Scalar Scalar; template EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator &eval, const Func& func, Index /*size*/) { return redux_vec_unroller::OuterSize>::template run(eval,func); } }; /* Add a specialization of redux_vec_unroller for size==0 at compiletime. * This specialization is not required for general reductions, which is * why it is defined here. */ template struct redux_vec_unroller { template EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator &, const Func& f) { return packetwise_redux_empty_value(f); } }; /* Perform the actual reduction for dynamic sizes */ template struct packetwise_redux_impl { typedef typename Evaluator::Scalar Scalar; typedef typename redux_traits::PacketType PacketScalar; template EIGEN_DEVICE_FUNC static PacketType run(const Evaluator &eval, const Func& func, Index size) { if(size==0) return packetwise_redux_empty_value(func); const Index size4 = (size-1)&(~3); PacketType p = eval.template packetByOuterInner(0,0); Index i = 1; // This loop is optimized for instruction pipelining: // - each iteration generates two independent instructions // - thanks to branch prediction and out-of-order execution we have independent instructions across loops for(; i(i+0,0),eval.template packetByOuterInner(i+1,0)), func.packetOp(eval.template packetByOuterInner(i+2,0),eval.template packetByOuterInner(i+3,0)))); for(; i(i,0)); return p; } }; template< typename ArgType, typename MemberOp, int Direction> struct evaluator > : evaluator_base > { typedef PartialReduxExpr XprType; typedef typename internal::nested_eval::type ArgTypeNested; typedef typename internal::add_const_on_value_type::type ConstArgTypeNested; typedef typename internal::remove_all::type ArgTypeNestedCleaned; typedef typename ArgType::Scalar InputScalar; typedef typename XprType::Scalar Scalar; enum { TraversalSize = Direction==int(Vertical) ? int(ArgType::RowsAtCompileTime) : int(ArgType::ColsAtCompileTime) }; typedef typename MemberOp::template Cost CostOpType; enum { CoeffReadCost = TraversalSize==Dynamic ? HugeCost : TraversalSize==0 ? 1 : int(TraversalSize) * int(evaluator::CoeffReadCost) + int(CostOpType::value), _ArgFlags = evaluator::Flags, _Vectorizable = bool(int(_ArgFlags)&PacketAccessBit) && bool(MemberOp::Vectorizable) && (Direction==int(Vertical) ? bool(_ArgFlags&RowMajorBit) : (_ArgFlags&RowMajorBit)==0) && (TraversalSize!=0), Flags = (traits::Flags&RowMajorBit) | (evaluator::Flags&(HereditaryBits&(~RowMajorBit))) | (_Vectorizable ? PacketAccessBit : 0) | LinearAccessBit, Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr) : m_arg(xpr.nestedExpression()), m_functor(xpr.functor()) { EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize==Dynamic ? HugeCost : (TraversalSize==0 ? 1 : int(CostOpType::value))); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index i, Index j) const { return coeff(Direction==Vertical ? j : i); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index index) const { return m_functor(m_arg.template subVector(index)); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packet(Index i, Index j) const { return packet(Direction==Vertical ? j : i); } template EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC PacketType packet(Index idx) const { enum { PacketSize = internal::unpacket_traits::size }; typedef Block PanelType; PanelType panel(m_arg, Direction==Vertical ? 0 : idx, Direction==Vertical ? idx : 0, Direction==Vertical ? m_arg.rows() : Index(PacketSize), Direction==Vertical ? Index(PacketSize) : m_arg.cols()); // FIXME // See bug 1612, currently if PacketSize==1 (i.e. complex with 128bits registers) then the storage-order of panel get reversed // and methods like packetByOuterInner do not make sense anymore in this context. // So let's just by pass "vectorization" in this case: if(PacketSize==1) return internal::pset1(coeff(idx)); typedef typename internal::redux_evaluator PanelEvaluator; PanelEvaluator panel_eval(panel); typedef typename MemberOp::BinaryOp BinaryOp; PacketType p = internal::packetwise_redux_impl::template run(panel_eval,m_functor.binaryFunc(),m_arg.outerSize()); return p; } protected: ConstArgTypeNested m_arg; const MemberOp m_functor; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARTIALREDUX_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/PermutationMatrix.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob // Copyright (C) 2009-2015 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PERMUTATIONMATRIX_H #define EIGEN_PERMUTATIONMATRIX_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { enum PermPermProduct_t {PermPermProduct}; } // end namespace internal /** \class PermutationBase * \ingroup Core_Module * * \brief Base class for permutations * * \tparam Derived the derived class * * This class is the base class for all expressions representing a permutation matrix, * internally stored as a vector of integers. * The convention followed here is that if \f$ \sigma \f$ is a permutation, the corresponding permutation matrix * \f$ P_\sigma \f$ is such that if \f$ (e_1,\ldots,e_p) \f$ is the canonical basis, we have: * \f[ P_\sigma(e_i) = e_{\sigma(i)}. \f] * This convention ensures that for any two permutations \f$ \sigma, \tau \f$, we have: * \f[ P_{\sigma\circ\tau} = P_\sigma P_\tau. \f] * * Permutation matrices are square and invertible. * * Notice that in addition to the member functions and operators listed here, there also are non-member * operator* to multiply any kind of permutation object with any kind of matrix expression (MatrixBase) * on either side. * * \sa class PermutationMatrix, class PermutationWrapper */ template class PermutationBase : public EigenBase { typedef internal::traits Traits; typedef EigenBase Base; public: #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename Traits::IndicesType IndicesType; enum { Flags = Traits::Flags, RowsAtCompileTime = Traits::RowsAtCompileTime, ColsAtCompileTime = Traits::ColsAtCompileTime, MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime, MaxColsAtCompileTime = Traits::MaxColsAtCompileTime }; typedef typename Traits::StorageIndex StorageIndex; typedef Matrix DenseMatrixType; typedef PermutationMatrix PlainPermutationType; typedef PlainPermutationType PlainObject; using Base::derived; typedef Inverse InverseReturnType; typedef void Scalar; #endif /** Copies the other permutation into *this */ template Derived& operator=(const PermutationBase& other) { indices() = other.indices(); return derived(); } /** Assignment from the Transpositions \a tr */ template Derived& operator=(const TranspositionsBase& tr) { setIdentity(tr.size()); for(Index k=size()-1; k>=0; --k) applyTranspositionOnTheRight(k,tr.coeff(k)); return derived(); } /** \returns the number of rows */ inline EIGEN_DEVICE_FUNC Index rows() const { return Index(indices().size()); } /** \returns the number of columns */ inline EIGEN_DEVICE_FUNC Index cols() const { return Index(indices().size()); } /** \returns the size of a side of the respective square matrix, i.e., the number of indices */ inline EIGEN_DEVICE_FUNC Index size() const { return Index(indices().size()); } #ifndef EIGEN_PARSED_BY_DOXYGEN template void evalTo(MatrixBase& other) const { other.setZero(); for (Index i=0; i=0 && j>=0 && i=0 && j>=0 && i void assignTranspose(const PermutationBase& other) { for (Index i=0; i void assignProduct(const Lhs& lhs, const Rhs& rhs) { eigen_assert(lhs.cols() == rhs.rows()); for (Index i=0; i inline PlainPermutationType operator*(const PermutationBase& other) const { return PlainPermutationType(internal::PermPermProduct, derived(), other.derived()); } /** \returns the product of a permutation with another inverse permutation. * * \note \blank \note_try_to_help_rvo */ template inline PlainPermutationType operator*(const InverseImpl& other) const { return PlainPermutationType(internal::PermPermProduct, *this, other.eval()); } /** \returns the product of an inverse permutation with another permutation. * * \note \blank \note_try_to_help_rvo */ template friend inline PlainPermutationType operator*(const InverseImpl& other, const PermutationBase& perm) { return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); } /** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the permutation. * * This function is O(\c n) procedure allocating a buffer of \c n booleans. */ Index determinant() const { Index res = 1; Index n = size(); Matrix mask(n); mask.fill(false); Index r = 0; while(r < n) { // search for the next seed while(r=n) break; // we got one, let's follow it until we are back to the seed Index k0 = r++; mask.coeffRef(k0) = true; for(Index k=indices().coeff(k0); k!=k0; k=indices().coeff(k)) { mask.coeffRef(k) = true; res = -res; } } return res; } protected: }; namespace internal { template struct traits > : traits > { typedef PermutationStorage StorageKind; typedef Matrix IndicesType; typedef StorageIndex_ StorageIndex; typedef void Scalar; }; } /** \class PermutationMatrix * \ingroup Core_Module * * \brief Permutation matrix * * \tparam SizeAtCompileTime the number of rows/cols, or Dynamic * \tparam MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it. * \tparam StorageIndex_ the integer type of the indices * * This class represents a permutation matrix, internally stored as a vector of integers. * * \sa class PermutationBase, class PermutationWrapper, class DiagonalMatrix */ template class PermutationMatrix : public PermutationBase > { typedef PermutationBase Base; typedef internal::traits Traits; public: typedef const PermutationMatrix& Nested; #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename Traits::IndicesType IndicesType; typedef typename Traits::StorageIndex StorageIndex; #endif inline PermutationMatrix() {} /** Constructs an uninitialized permutation matrix of given size. */ explicit inline PermutationMatrix(Index size) : m_indices(size) { eigen_internal_assert(size <= NumTraits::highest()); } /** Copy constructor. */ template inline PermutationMatrix(const PermutationBase& other) : m_indices(other.indices()) {} /** Generic constructor from expression of the indices. The indices * array has the meaning that the permutations sends each integer i to indices[i]. * * \warning It is your responsibility to check that the indices array that you passes actually * describes a permutation, i.e., each value between 0 and n-1 occurs exactly once, where n is the * array's size. */ template explicit inline PermutationMatrix(const MatrixBase& indices) : m_indices(indices) {} /** Convert the Transpositions \a tr to a permutation matrix */ template explicit PermutationMatrix(const TranspositionsBase& tr) : m_indices(tr.size()) { *this = tr; } /** Copies the other permutation into *this */ template PermutationMatrix& operator=(const PermutationBase& other) { m_indices = other.indices(); return *this; } /** Assignment from the Transpositions \a tr */ template PermutationMatrix& operator=(const TranspositionsBase& tr) { return Base::operator=(tr.derived()); } /** const version of indices(). */ const IndicesType& indices() const { return m_indices; } /** \returns a reference to the stored array representing the permutation. */ IndicesType& indices() { return m_indices; } /**** multiplication helpers to hopefully get RVO ****/ #ifndef EIGEN_PARSED_BY_DOXYGEN template PermutationMatrix(const InverseImpl& other) : m_indices(other.derived().nestedExpression().size()) { eigen_internal_assert(m_indices.size() <= NumTraits::highest()); StorageIndex end = StorageIndex(m_indices.size()); for (StorageIndex i=0; i PermutationMatrix(internal::PermPermProduct_t, const Lhs& lhs, const Rhs& rhs) : m_indices(lhs.indices().size()) { Base::assignProduct(lhs,rhs); } #endif protected: IndicesType m_indices; }; namespace internal { template struct traits,_PacketAccess> > : traits > { typedef PermutationStorage StorageKind; typedef Map, _PacketAccess> IndicesType; typedef StorageIndex_ StorageIndex; typedef void Scalar; }; } template class Map,_PacketAccess> : public PermutationBase,_PacketAccess> > { typedef PermutationBase Base; typedef internal::traits Traits; public: #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename Traits::IndicesType IndicesType; typedef typename IndicesType::Scalar StorageIndex; #endif inline Map(const StorageIndex* indicesPtr) : m_indices(indicesPtr) {} inline Map(const StorageIndex* indicesPtr, Index size) : m_indices(indicesPtr,size) {} /** Copies the other permutation into *this */ template Map& operator=(const PermutationBase& other) { return Base::operator=(other.derived()); } /** Assignment from the Transpositions \a tr */ template Map& operator=(const TranspositionsBase& tr) { return Base::operator=(tr.derived()); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ Map& operator=(const Map& other) { m_indices = other.m_indices; return *this; } #endif /** const version of indices(). */ const IndicesType& indices() const { return m_indices; } /** \returns a reference to the stored array representing the permutation. */ IndicesType& indices() { return m_indices; } protected: IndicesType m_indices; }; template class TranspositionsWrapper; namespace internal { template struct traits > { typedef PermutationStorage StorageKind; typedef void Scalar; typedef typename IndicesType_::Scalar StorageIndex; typedef IndicesType_ IndicesType; enum { RowsAtCompileTime = IndicesType_::SizeAtCompileTime, ColsAtCompileTime = IndicesType_::SizeAtCompileTime, MaxRowsAtCompileTime = IndicesType::MaxSizeAtCompileTime, MaxColsAtCompileTime = IndicesType::MaxSizeAtCompileTime, Flags = 0 }; }; } /** \class PermutationWrapper * \ingroup Core_Module * * \brief Class to view a vector of integers as a permutation matrix * * \tparam IndicesType_ the type of the vector of integer (can be any compatible expression) * * This class allows to view any vector expression of integers as a permutation matrix. * * \sa class PermutationBase, class PermutationMatrix */ template class PermutationWrapper : public PermutationBase > { typedef PermutationBase Base; typedef internal::traits Traits; public: #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename Traits::IndicesType IndicesType; #endif inline PermutationWrapper(const IndicesType& indices) : m_indices(indices) {} /** const version of indices(). */ const typename internal::remove_all::type& indices() const { return m_indices; } protected: typename IndicesType::Nested m_indices; }; /** \returns the matrix with the permutation applied to the columns. */ template EIGEN_DEVICE_FUNC const Product operator*(const MatrixBase &matrix, const PermutationBase& permutation) { return Product (matrix.derived(), permutation.derived()); } /** \returns the matrix with the permutation applied to the rows. */ template EIGEN_DEVICE_FUNC const Product operator*(const PermutationBase &permutation, const MatrixBase& matrix) { return Product (permutation.derived(), matrix.derived()); } template class InverseImpl : public EigenBase > { typedef typename PermutationType::PlainPermutationType PlainPermutationType; typedef internal::traits PermTraits; protected: InverseImpl() {} public: typedef Inverse InverseType; using EigenBase >::derived; #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename PermutationType::DenseMatrixType DenseMatrixType; enum { RowsAtCompileTime = PermTraits::RowsAtCompileTime, ColsAtCompileTime = PermTraits::ColsAtCompileTime, MaxRowsAtCompileTime = PermTraits::MaxRowsAtCompileTime, MaxColsAtCompileTime = PermTraits::MaxColsAtCompileTime }; #endif #ifndef EIGEN_PARSED_BY_DOXYGEN template void evalTo(MatrixBase& other) const { other.setZero(); for (Index i=0; i friend const Product operator*(const MatrixBase& matrix, const InverseType& trPerm) { return Product(matrix.derived(), trPerm.derived()); } /** \returns the matrix with the inverse permutation applied to the rows. */ template const Product operator*(const MatrixBase& matrix) const { return Product(derived(), matrix.derived()); } }; template const PermutationWrapper MatrixBase::asPermutation() const { return derived(); } namespace internal { template<> struct AssignmentKind { typedef EigenBase2EigenBase Kind; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_PERMUTATIONMATRIX_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/PlainObjectBase.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DENSESTORAGEBASE_H #define EIGEN_DENSESTORAGEBASE_H #if defined(EIGEN_INITIALIZE_MATRICES_BY_ZERO) # define EIGEN_INITIALIZE_COEFFS # define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(Index i=0;i::quiet_NaN(); #else # undef EIGEN_INITIALIZE_COEFFS # define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED #endif #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct check_rows_cols_for_overflow { template EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE void run(Index, Index) { } }; template<> struct check_rows_cols_for_overflow { template EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE void run(Index rows, Index cols) { // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242 // we assume Index is signed Index max_index = (std::size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed bool error = (rows == 0 || cols == 0) ? false : (rows > max_index / cols); if (error) throw_std_bad_alloc(); } }; template struct conservative_resize_like_impl; template struct matrix_swap_impl; } // end namespace internal #ifdef EIGEN_PARSED_BY_DOXYGEN namespace doxygen { // This is a workaround to doxygen not being able to understand the inheritance logic // when it is hidden by the dense_xpr_base helper struct. // Moreover, doxygen fails to include members that are not documented in the declaration body of // MatrixBase if we inherits MatrixBase >, // this is why we simply inherits MatrixBase, though this does not make sense. /** This class is just a workaround for Doxygen and it does not not actually exist. */ template struct dense_xpr_base_dispatcher; /** This class is just a workaround for Doxygen and it does not not actually exist. */ template struct dense_xpr_base_dispatcher > : public MatrixBase {}; /** This class is just a workaround for Doxygen and it does not not actually exist. */ template struct dense_xpr_base_dispatcher > : public ArrayBase {}; } // namespace doxygen /** \class PlainObjectBase * \ingroup Core_Module * \brief %Dense storage base class for matrices and arrays. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_PLAINOBJECTBASE_PLUGIN. * * \tparam Derived is the derived type, e.g., a Matrix or Array * * \sa \ref TopicClassHierarchy */ template class PlainObjectBase : public doxygen::dense_xpr_base_dispatcher #else template class PlainObjectBase : public internal::dense_xpr_base::type #endif { public: enum { Options = internal::traits::Options }; typedef typename internal::dense_xpr_base::type Base; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; typedef Derived DenseType; using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; using Base::SizeAtCompileTime; using Base::MaxRowsAtCompileTime; using Base::MaxColsAtCompileTime; using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; typedef Eigen::Map MapType; typedef const Eigen::Map ConstMapType; typedef Eigen::Map AlignedMapType; typedef const Eigen::Map ConstAlignedMapType; template struct StridedMapType { typedef Eigen::Map type; }; template struct StridedConstMapType { typedef Eigen::Map type; }; template struct StridedAlignedMapType { typedef Eigen::Map type; }; template struct StridedConstAlignedMapType { typedef Eigen::Map type; }; protected: DenseStorage m_storage; public: enum { NeedsToAlign = (SizeAtCompileTime != Dynamic) && (internal::traits::Alignment>0) }; EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (int(Options)&RowMajor)==RowMajor), INVALID_MATRIX_TEMPLATE_PARAMETERS) EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (int(Options)&RowMajor)==0), INVALID_MATRIX_TEMPLATE_PARAMETERS) EIGEN_STATIC_ASSERT((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0), INVALID_MATRIX_TEMPLATE_PARAMETERS) EIGEN_STATIC_ASSERT((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0), INVALID_MATRIX_TEMPLATE_PARAMETERS) EIGEN_STATIC_ASSERT((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0), INVALID_MATRIX_TEMPLATE_PARAMETERS) EIGEN_STATIC_ASSERT((MaxColsAtCompileTime == Dynamic) || (MaxColsAtCompileTime >= 0), INVALID_MATRIX_TEMPLATE_PARAMETERS) EIGEN_STATIC_ASSERT((MaxRowsAtCompileTime == RowsAtCompileTime || RowsAtCompileTime==Dynamic), INVALID_MATRIX_TEMPLATE_PARAMETERS) EIGEN_STATIC_ASSERT((MaxColsAtCompileTime == ColsAtCompileTime || ColsAtCompileTime==Dynamic), INVALID_MATRIX_TEMPLATE_PARAMETERS) EIGEN_STATIC_ASSERT(((Options & (DontAlign|RowMajor)) == Options), INVALID_MATRIX_TEMPLATE_PARAMETERS) EIGEN_DEVICE_FUNC Base& base() { return *static_cast(this); } EIGEN_DEVICE_FUNC const Base& base() const { return *static_cast(this); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_storage.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_storage.cols(); } /** This is an overloaded version of DenseCoeffsBase::coeff(Index,Index) const * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. * * See DenseCoeffsBase::coeff(Index) const for details. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index rowId, Index colId) const { if(Flags & RowMajorBit) return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major return m_storage.data()[rowId + colId * m_storage.rows()]; } /** This is an overloaded version of DenseCoeffsBase::coeff(Index) const * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. * * See DenseCoeffsBase::coeff(Index) const for details. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const { return m_storage.data()[index]; } /** This is an overloaded version of DenseCoeffsBase::coeffRef(Index,Index) const * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. * * See DenseCoeffsBase::coeffRef(Index,Index) const for details. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index rowId, Index colId) { if(Flags & RowMajorBit) return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major return m_storage.data()[rowId + colId * m_storage.rows()]; } /** This is an overloaded version of DenseCoeffsBase::coeffRef(Index) const * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. * * See DenseCoeffsBase::coeffRef(Index) const for details. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_storage.data()[index]; } /** This is the const version of coeffRef(Index,Index) which is thus synonym of coeff(Index,Index). * It is provided for convenience. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeffRef(Index rowId, Index colId) const { if(Flags & RowMajorBit) return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major return m_storage.data()[rowId + colId * m_storage.rows()]; } /** This is the const version of coeffRef(Index) which is thus synonym of coeff(Index). * It is provided for convenience. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const { return m_storage.data()[index]; } /** \internal */ template EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const { return internal::ploadt (m_storage.data() + (Flags & RowMajorBit ? colId + rowId * m_storage.cols() : rowId + colId * m_storage.rows())); } /** \internal */ template EIGEN_STRONG_INLINE PacketScalar packet(Index index) const { return internal::ploadt(m_storage.data() + index); } /** \internal */ template EIGEN_STRONG_INLINE void writePacket(Index rowId, Index colId, const PacketScalar& val) { internal::pstoret (m_storage.data() + (Flags & RowMajorBit ? colId + rowId * m_storage.cols() : rowId + colId * m_storage.rows()), val); } /** \internal */ template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& val) { internal::pstoret(m_storage.data() + index, val); } /** \returns a const pointer to the data array of this matrix */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } /** \returns a pointer to the data array of this matrix */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } /** Resizes \c *this to a \a rows x \a cols matrix. * * This method is intended for dynamic-size matrices, although it is legal to call it on any * matrix as long as fixed dimensions are left unchanged. If you only want to change the number * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t). * * If the current number of coefficients of \c *this exactly matches the * product \a rows * \a cols, then no memory allocation is performed and * the current values are left unchanged. In all other cases, including * shrinking, the data is reallocated and all previous values are lost. * * Example: \include Matrix_resize_int_int.cpp * Output: \verbinclude Matrix_resize_int_int.out * * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index rows, Index cols) { eigen_assert(EIGEN_IMPLIES(RowsAtCompileTime!=Dynamic,rows==RowsAtCompileTime) && EIGEN_IMPLIES(ColsAtCompileTime!=Dynamic,cols==ColsAtCompileTime) && EIGEN_IMPLIES(RowsAtCompileTime==Dynamic && MaxRowsAtCompileTime!=Dynamic,rows<=MaxRowsAtCompileTime) && EIGEN_IMPLIES(ColsAtCompileTime==Dynamic && MaxColsAtCompileTime!=Dynamic,cols<=MaxColsAtCompileTime) && rows>=0 && cols>=0 && "Invalid sizes when resizing a matrix or array."); internal::check_rows_cols_for_overflow::run(rows, cols); #ifdef EIGEN_INITIALIZE_COEFFS Index size = rows*cols; bool size_changed = size != this->size(); m_storage.resize(size, rows, cols); if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED #else m_storage.resize(rows*cols, rows, cols); #endif } /** Resizes \c *this to a vector of length \a size * * \only_for_vectors. This method does not work for * partially dynamic matrices when the static dimension is anything other * than 1. For example it will not work with Matrix. * * Example: \include Matrix_resize_int.cpp * Output: \verbinclude Matrix_resize_int.out * * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t) */ EIGEN_DEVICE_FUNC inline void resize(Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase) eigen_assert(((SizeAtCompileTime == Dynamic && (MaxSizeAtCompileTime==Dynamic || size<=MaxSizeAtCompileTime)) || SizeAtCompileTime == size) && size>=0); #ifdef EIGEN_INITIALIZE_COEFFS bool size_changed = size != this->size(); #endif if(RowsAtCompileTime == 1) m_storage.resize(size, 1, size); else m_storage.resize(size, size, 1); #ifdef EIGEN_INITIALIZE_COEFFS if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED #endif } /** Resizes the matrix, changing only the number of columns. For the parameter of type NoChange_t, just pass the special value \c NoChange * as in the example below. * * Example: \include Matrix_resize_NoChange_int.cpp * Output: \verbinclude Matrix_resize_NoChange_int.out * * \sa resize(Index,Index) */ EIGEN_DEVICE_FUNC inline void resize(NoChange_t, Index cols) { resize(rows(), cols); } /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \c NoChange * as in the example below. * * Example: \include Matrix_resize_int_NoChange.cpp * Output: \verbinclude Matrix_resize_int_NoChange.out * * \sa resize(Index,Index) */ EIGEN_DEVICE_FUNC inline void resize(Index rows, NoChange_t) { resize(rows, cols()); } /** Resizes \c *this to have the same dimensions as \a other. * Takes care of doing all the checking that's needed. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resizeLike(const EigenBase& _other) { const OtherDerived& other = _other.derived(); internal::check_rows_cols_for_overflow::run(other.rows(), other.cols()); const Index othersize = other.rows()*other.cols(); if(RowsAtCompileTime == 1) { eigen_assert(other.rows() == 1 || other.cols() == 1); resize(1, othersize); } else if(ColsAtCompileTime == 1) { eigen_assert(other.rows() == 1 || other.cols() == 1); resize(othersize, 1); } else resize(other.rows(), other.cols()); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. * * The method is intended for matrices of dynamic size. If you only want to change the number * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or * conservativeResize(Index, NoChange_t). * * Matrices are resized relative to the top-left element. In case values need to be * appended to the matrix they will be uninitialized. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols) { internal::conservative_resize_like_impl::run(*this, rows, cols); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. * * As opposed to conservativeResize(Index rows, Index cols), this version leaves * the number of columns unchanged. * * In case the matrix is growing, new rows will be uninitialized. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t) { // Note: see the comment in conservativeResize(Index,Index) conservativeResize(rows, cols()); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. * * As opposed to conservativeResize(Index rows, Index cols), this version leaves * the number of rows unchanged. * * In case the matrix is growing, new columns will be uninitialized. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols) { // Note: see the comment in conservativeResize(Index,Index) conservativeResize(rows(), cols); } /** Resizes the vector to \a size while retaining old values. * * \only_for_vectors. This method does not work for * partially dynamic matrices when the static dimension is anything other * than 1. For example it will not work with Matrix. * * When values are appended, they will be uninitialized. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResize(Index size) { internal::conservative_resize_like_impl::run(*this, size); } /** Resizes the matrix to \a rows x \a cols of \c other, while leaving old values untouched. * * The method is intended for matrices of dynamic size. If you only want to change the number * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or * conservativeResize(Index, NoChange_t). * * Matrices are resized relative to the top-left element. In case values need to be * appended to the matrix they will copied from \c other. */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResizeLike(const DenseBase& other) { internal::conservative_resize_like_impl::run(*this, other); } /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const PlainObjectBase& other) { return _set(other); } /** \sa MatrixBase::lazyAssign() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& lazyAssign(const DenseBase& other) { _resize_to_match(other); return Base::lazyAssign(other.derived()); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const ReturnByValue& func) { resize(func.rows(), func.cols()); return Base::operator=(func); } // Prevent user from trying to instantiate PlainObjectBase objects // by making all its constructor protected. See bug 1074. protected: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase() : m_storage() { // EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #ifndef EIGEN_PARSED_BY_DOXYGEN // FIXME is it still needed ? /** \internal */ EIGEN_DEVICE_FUNC explicit PlainObjectBase(internal::constructor_without_unaligned_array_assert) : m_storage(internal::constructor_without_unaligned_array_assert()) { // EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #endif #if EIGEN_HAS_RVALUE_REFERENCES EIGEN_DEVICE_FUNC PlainObjectBase(PlainObjectBase&& other) EIGEN_NOEXCEPT : m_storage( std::move(other.m_storage) ) { } EIGEN_DEVICE_FUNC PlainObjectBase& operator=(PlainObjectBase&& other) EIGEN_NOEXCEPT { m_storage = std::move(other.m_storage); return *this; } #endif /** Copy constructor */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(const PlainObjectBase& other) : Base(), m_storage(other.m_storage) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(Index size, Index rows, Index cols) : m_storage(size, rows, cols) { // EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #if EIGEN_HAS_CXX11 /** \brief Construct a row of column vector with fixed size from an arbitrary number of coefficients. \cpp11 * * \only_for_vectors * * This constructor is for 1D array or vectors with more than 4 coefficients. * There exists C++98 analogue constructors for fixed-size array/vector having 1, 2, 3, or 4 coefficients. * * \warning To construct a column (resp. row) vector of fixed length, the number of values passed to this * constructor must match the the fixed number of rows (resp. columns) of \c *this. */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args) : m_storage() { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, sizeof...(args) + 4); m_storage.data()[0] = a0; m_storage.data()[1] = a1; m_storage.data()[2] = a2; m_storage.data()[3] = a3; Index i = 4; auto x = {(m_storage.data()[i++] = args, 0)...}; static_cast(x); } /** \brief Constructs a Matrix or Array and initializes it by elements given by an initializer list of initializer * lists \cpp11 */ EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE PlainObjectBase(const std::initializer_list>& list) : m_storage() { size_t list_size = 0; if (list.begin() != list.end()) { list_size = list.begin()->size(); } // This is to allow syntax like VectorXi {{1, 2, 3, 4}} if (ColsAtCompileTime == 1 && list.size() == 1) { eigen_assert(list_size == static_cast(RowsAtCompileTime) || RowsAtCompileTime == Dynamic); resize(list_size, ColsAtCompileTime); std::copy(list.begin()->begin(), list.begin()->end(), m_storage.data()); } else { eigen_assert(list.size() == static_cast(RowsAtCompileTime) || RowsAtCompileTime == Dynamic); eigen_assert(list_size == static_cast(ColsAtCompileTime) || ColsAtCompileTime == Dynamic); resize(list.size(), list_size); Index row_index = 0; for (const std::initializer_list& row : list) { eigen_assert(list_size == row.size()); Index col_index = 0; for (const Scalar& e : row) { coeffRef(row_index, col_index) = e; ++col_index; } ++row_index; } } } #endif // end EIGEN_HAS_CXX11 /** \sa PlainObjectBase::operator=(const EigenBase&) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(const DenseBase &other) : m_storage() { resizeLike(other); _set_noalias(other); } /** \sa PlainObjectBase::operator=(const EigenBase&) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase &other) : m_storage() { resizeLike(other); *this = other.derived(); } /** \brief Copy constructor with in-place evaluation */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(const ReturnByValue& other) { // FIXME this does not automatically transpose vectors if necessary resize(other.rows(), other.cols()); other.evalTo(this->derived()); } public: /** \brief Copies the generic expression \a other into *this. * \copydetails DenseBase::operator=(const EigenBase &other) */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const EigenBase &other) { _resize_to_match(other); Base::operator=(other.derived()); return this->derived(); } /** \name Map * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects, * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned * \a data pointers. * * Here is an example using strides: * \include Matrix_Map_stride.cpp * Output: \verbinclude Matrix_Map_stride.out * * \see class Map */ //@{ static inline ConstMapType Map(const Scalar* data) { return ConstMapType(data); } static inline MapType Map(Scalar* data) { return MapType(data); } static inline ConstMapType Map(const Scalar* data, Index size) { return ConstMapType(data, size); } static inline MapType Map(Scalar* data, Index size) { return MapType(data, size); } static inline ConstMapType Map(const Scalar* data, Index rows, Index cols) { return ConstMapType(data, rows, cols); } static inline MapType Map(Scalar* data, Index rows, Index cols) { return MapType(data, rows, cols); } static inline ConstAlignedMapType MapAligned(const Scalar* data) { return ConstAlignedMapType(data); } static inline AlignedMapType MapAligned(Scalar* data) { return AlignedMapType(data); } static inline ConstAlignedMapType MapAligned(const Scalar* data, Index size) { return ConstAlignedMapType(data, size); } static inline AlignedMapType MapAligned(Scalar* data, Index size) { return AlignedMapType(data, size); } static inline ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols) { return ConstAlignedMapType(data, rows, cols); } static inline AlignedMapType MapAligned(Scalar* data, Index rows, Index cols) { return AlignedMapType(data, rows, cols); } template static inline typename StridedConstMapType >::type Map(const Scalar* data, const Stride& stride) { return typename StridedConstMapType >::type(data, stride); } template static inline typename StridedMapType >::type Map(Scalar* data, const Stride& stride) { return typename StridedMapType >::type(data, stride); } template static inline typename StridedConstMapType >::type Map(const Scalar* data, Index size, const Stride& stride) { return typename StridedConstMapType >::type(data, size, stride); } template static inline typename StridedMapType >::type Map(Scalar* data, Index size, const Stride& stride) { return typename StridedMapType >::type(data, size, stride); } template static inline typename StridedConstMapType >::type Map(const Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedConstMapType >::type(data, rows, cols, stride); } template static inline typename StridedMapType >::type Map(Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedMapType >::type(data, rows, cols, stride); } template static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, const Stride& stride) { return typename StridedConstAlignedMapType >::type(data, stride); } template static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, const Stride& stride) { return typename StridedAlignedMapType >::type(data, stride); } template static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index size, const Stride& stride) { return typename StridedConstAlignedMapType >::type(data, size, stride); } template static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index size, const Stride& stride) { return typename StridedAlignedMapType >::type(data, size, stride); } template static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedConstAlignedMapType >::type(data, rows, cols, stride); } template static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedAlignedMapType >::type(data, rows, cols, stride); } //@} using Base::setConstant; EIGEN_DEVICE_FUNC Derived& setConstant(Index size, const Scalar& val); EIGEN_DEVICE_FUNC Derived& setConstant(Index rows, Index cols, const Scalar& val); EIGEN_DEVICE_FUNC Derived& setConstant(NoChange_t, Index cols, const Scalar& val); EIGEN_DEVICE_FUNC Derived& setConstant(Index rows, NoChange_t, const Scalar& val); using Base::setZero; EIGEN_DEVICE_FUNC Derived& setZero(Index size); EIGEN_DEVICE_FUNC Derived& setZero(Index rows, Index cols); EIGEN_DEVICE_FUNC Derived& setZero(NoChange_t, Index cols); EIGEN_DEVICE_FUNC Derived& setZero(Index rows, NoChange_t); using Base::setOnes; EIGEN_DEVICE_FUNC Derived& setOnes(Index size); EIGEN_DEVICE_FUNC Derived& setOnes(Index rows, Index cols); EIGEN_DEVICE_FUNC Derived& setOnes(NoChange_t, Index cols); EIGEN_DEVICE_FUNC Derived& setOnes(Index rows, NoChange_t); using Base::setRandom; Derived& setRandom(Index size); Derived& setRandom(Index rows, Index cols); Derived& setRandom(NoChange_t, Index cols); Derived& setRandom(Index rows, NoChange_t); #ifdef EIGEN_PLAINOBJECTBASE_PLUGIN #include EIGEN_PLAINOBJECTBASE_PLUGIN #endif protected: /** \internal Resizes *this in preparation for assigning \a other to it. * Takes care of doing all the checking that's needed. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase& other) { #ifdef EIGEN_NO_AUTOMATIC_RESIZING eigen_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size()) : (rows() == other.rows() && cols() == other.cols()))) && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined"); EIGEN_ONLY_USED_FOR_DEBUG(other); #else resizeLike(other); #endif } /** * \brief Copies the value of the expression \a other into \c *this with automatic resizing. * * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), * it will be initialized. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. * * \sa operator=(const MatrixBase&), _set_noalias() * * \internal */ // aliasing is dealt once in internal::call_assignment // so at this stage we have to assume aliasing... and resising has to be done later. template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& _set(const DenseBase& other) { internal::call_assignment(this->derived(), other.derived()); return this->derived(); } /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which * is the case when creating a new matrix) so one can enforce lazy evaluation. * * \sa operator=(const MatrixBase&), _set() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase& other) { // I don't think we need this resize call since the lazyAssign will anyways resize // and lazyAssign will be called by the assign selector. //_resize_to_match(other); // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because // it wouldn't allow to copy a row-vector into a column-vector. internal::call_assignment_no_alias(this->derived(), other.derived(), internal::assign_op()); return this->derived(); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if::type* = 0) { const bool t0_is_integer_alike = internal::is_valid_index_type::value; const bool t1_is_integer_alike = internal::is_valid_index_type::value; EIGEN_STATIC_ASSERT(t0_is_integer_alike && t1_is_integer_alike, FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED) resize(rows,cols); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init2(const T0& val0, const T1& val1, typename internal::enable_if::type* = 0) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2) m_storage.data()[0] = Scalar(val0); m_storage.data()[1] = Scalar(val1); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init2(const Index& val0, const Index& val1, typename internal::enable_if< (!internal::is_same::value) && (internal::is_same::value) && (internal::is_same::value) && Base::SizeAtCompileTime==2,T1>::type* = 0) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2) m_storage.data()[0] = Scalar(val0); m_storage.data()[1] = Scalar(val1); } // The argument is convertible to the Index type and we either have a non 1x1 Matrix, or a dynamic-sized Array, // then the argument is meant to be the size of the object. template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(Index size, typename internal::enable_if< (Base::SizeAtCompileTime!=1 || !internal::is_convertible::value) && ((!internal::is_same::XprKind,ArrayXpr>::value || Base::SizeAtCompileTime==Dynamic)),T>::type* = 0) { // NOTE MSVC 2008 complains if we directly put bool(NumTraits::IsInteger) as the EIGEN_STATIC_ASSERT argument. const bool is_integer_alike = internal::is_valid_index_type::value; EIGEN_UNUSED_VARIABLE(is_integer_alike); EIGEN_STATIC_ASSERT(is_integer_alike, FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED) resize(size); } // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type can be implicitly converted) template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Scalar& val0, typename internal::enable_if::value,T>::type* = 0) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 1) m_storage.data()[0] = val0; } // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type match the index type) template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Index& val0, typename internal::enable_if< (!internal::is_same::value) && (internal::is_same::value) && Base::SizeAtCompileTime==1 && internal::is_convertible::value,T*>::type* = 0) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 1) m_storage.data()[0] = Scalar(val0); } // Initialize a fixed size matrix from a pointer to raw data template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Scalar* data){ this->_set_noalias(ConstMapType(data)); } // Initialize an arbitrary matrix from a dense expression template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const DenseBase& other){ this->_set_noalias(other); } // Initialize an arbitrary matrix from an object convertible to the Derived type. template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Derived& other){ this->_set_noalias(other); } // Initialize an arbitrary matrix from a generic Eigen expression template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const EigenBase& other){ this->derived() = other; } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const ReturnByValue& other) { resize(other.rows(), other.cols()); other.evalTo(this->derived()); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const RotationBase& r) { this->derived() = r; } // For fixed-size Array template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Scalar& val0, typename internal::enable_if< Base::SizeAtCompileTime!=Dynamic && Base::SizeAtCompileTime!=1 && internal::is_convertible::value && internal::is_same::XprKind,ArrayXpr>::value,T>::type* = 0) { Base::setConstant(val0); } // For fixed-size Array template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Index& val0, typename internal::enable_if< (!internal::is_same::value) && (internal::is_same::value) && Base::SizeAtCompileTime!=Dynamic && Base::SizeAtCompileTime!=1 && internal::is_convertible::value && internal::is_same::XprKind,ArrayXpr>::value,T*>::type* = 0) { Base::setConstant(val0); } template friend struct internal::matrix_swap_impl; public: #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal * \brief Override DenseBase::swap() since for dynamic-sized matrices * of same type it is enough to swap the data pointers. */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void swap(DenseBase & other) { enum { SwapPointers = internal::is_same::value && Base::SizeAtCompileTime==Dynamic }; internal::matrix_swap_impl::run(this->derived(), other.derived()); } /** \internal * \brief const version forwarded to DenseBase::swap */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void swap(DenseBase const & other) { Base::swap(other.derived()); } enum { IsPlainObjectBase = 1 }; #endif public: // These apparently need to be down here for nvcc+icc to prevent duplicate // Map symbol. template friend class Eigen::Map; friend class Eigen::Map; friend class Eigen::Map; #if EIGEN_MAX_ALIGN_BYTES>0 // for EIGEN_MAX_ALIGN_BYTES==0, AlignedMax==Unaligned, and many compilers generate warnings for friend-ing a class twice. friend class Eigen::Map; friend class Eigen::Map; #endif }; namespace internal { template struct conservative_resize_like_impl { #if EIGEN_HAS_TYPE_TRAITS static const bool IsRelocatable = std::is_trivially_copyable::value; #else static const bool IsRelocatable = !NumTraits::RequireInitialization; #endif static void run(DenseBase& _this, Index rows, Index cols) { if (_this.rows() == rows && _this.cols() == cols) return; EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) if ( IsRelocatable && (( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows (!Derived::IsRowMajor && _this.rows() == rows) )) // column-major and we change only the number of columns { internal::check_rows_cols_for_overflow::run(rows, cols); _this.derived().m_storage.conservativeResize(rows*cols,rows,cols); } else { // The storage order does not allow us to use reallocation. Derived tmp(rows,cols); const Index common_rows = numext::mini(rows, _this.rows()); const Index common_cols = numext::mini(cols, _this.cols()); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); _this.derived().swap(tmp); } } static void run(DenseBase& _this, const DenseBase& other) { if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index), // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good. EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived) if ( IsRelocatable && (( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows (!Derived::IsRowMajor && _this.rows() == other.rows()) )) // column-major and we change only the number of columns { const Index new_rows = other.rows() - _this.rows(); const Index new_cols = other.cols() - _this.cols(); _this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols()); if (new_rows>0) _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows); else if (new_cols>0) _this.bottomRightCorner(other.rows(), new_cols) = other.rightCols(new_cols); } else { // The storage order does not allow us to use reallocation. Derived tmp(other); const Index common_rows = numext::mini(tmp.rows(), _this.rows()); const Index common_cols = numext::mini(tmp.cols(), _this.cols()); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); _this.derived().swap(tmp); } } }; // Here, the specialization for vectors inherits from the general matrix case // to allow calling .conservativeResize(rows,cols) on vectors. template struct conservative_resize_like_impl : conservative_resize_like_impl { typedef conservative_resize_like_impl Base; using Base::run; using Base::IsRelocatable; static void run(DenseBase& _this, Index size) { const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size; const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1; if(IsRelocatable) _this.derived().m_storage.conservativeResize(size,new_rows,new_cols); else Base::run(_this.derived(), new_rows, new_cols); } static void run(DenseBase& _this, const DenseBase& other) { if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; const Index num_new_elements = other.size() - _this.size(); const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows(); const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1; if(IsRelocatable) _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols); else Base::run(_this.derived(), new_rows, new_cols); if (num_new_elements > 0) _this.tail(num_new_elements) = other.tail(num_new_elements); } }; template struct matrix_swap_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(MatrixTypeA& a, MatrixTypeB& b) { a.base().swap(b); } }; template struct matrix_swap_impl { EIGEN_DEVICE_FUNC static inline void run(MatrixTypeA& a, MatrixTypeB& b) { static_cast(a).m_storage.swap(static_cast(b).m_storage); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_DENSESTORAGEBASE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Product.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PRODUCT_H #define EIGEN_PRODUCT_H #include "./InternalHeaderCheck.h" namespace Eigen { template class ProductImpl; namespace internal { template struct traits > { typedef typename remove_all::type LhsCleaned; typedef typename remove_all::type RhsCleaned; typedef traits LhsTraits; typedef traits RhsTraits; typedef MatrixXpr XprKind; typedef typename ScalarBinaryOpTraits::Scalar, typename traits::Scalar>::ReturnType Scalar; typedef typename product_promote_storage_type::ret>::ret StorageKind; typedef typename promote_index_type::type StorageIndex; enum { RowsAtCompileTime = LhsTraits::RowsAtCompileTime, ColsAtCompileTime = RhsTraits::ColsAtCompileTime, MaxRowsAtCompileTime = LhsTraits::MaxRowsAtCompileTime, MaxColsAtCompileTime = RhsTraits::MaxColsAtCompileTime, // FIXME: only needed by GeneralMatrixMatrixTriangular InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsTraits::ColsAtCompileTime, RhsTraits::RowsAtCompileTime), // The storage order is somewhat arbitrary here. The correct one will be determined through the evaluator. Flags = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? RowMajorBit : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0 : ( ((LhsTraits::Flags&NoPreferredStorageOrderBit) && (RhsTraits::Flags&RowMajorBit)) || ((RhsTraits::Flags&NoPreferredStorageOrderBit) && (LhsTraits::Flags&RowMajorBit)) ) ? RowMajorBit : NoPreferredStorageOrderBit }; }; } // end namespace internal /** \class Product * \ingroup Core_Module * * \brief Expression of the product of two arbitrary matrices or vectors * * \tparam Lhs_ the type of the left-hand side expression * \tparam Rhs_ the type of the right-hand side expression * * This class represents an expression of the product of two arbitrary matrices. * * The other template parameters are: * \tparam Option can be DefaultProduct, AliasFreeProduct, or LazyProduct * */ template class Product : public ProductImpl::StorageKind, typename internal::traits::StorageKind, internal::product_type::ret>::ret> { public: typedef Lhs_ Lhs; typedef Rhs_ Rhs; typedef typename ProductImpl< Lhs, Rhs, Option, typename internal::product_promote_storage_type::StorageKind, typename internal::traits::StorageKind, internal::product_type::ret>::ret>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(Product) typedef typename internal::ref_selector::type LhsNested; typedef typename internal::ref_selector::type RhsNested; typedef typename internal::remove_all::type LhsNestedCleaned; typedef typename internal::remove_all::type RhsNestedCleaned; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { eigen_assert(lhs.cols() == rhs.rows() && "invalid matrix product" && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_lhs.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const LhsNestedCleaned& lhs() const { return m_lhs; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const RhsNestedCleaned& rhs() const { return m_rhs; } protected: LhsNested m_lhs; RhsNested m_rhs; }; namespace internal { template::ret> class dense_product_base : public internal::dense_xpr_base >::type {}; /** Conversion to scalar for inner-products */ template class dense_product_base : public internal::dense_xpr_base >::type { typedef Product ProductXpr; typedef typename internal::dense_xpr_base::type Base; public: using Base::derived; typedef typename Base::Scalar Scalar; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator const Scalar() const { return internal::evaluator(derived()).coeff(0,0); } }; } // namespace internal // Generic API dispatcher template class ProductImpl : public internal::generic_xpr_base, MatrixXpr, StorageKind>::type { public: typedef typename internal::generic_xpr_base, MatrixXpr, StorageKind>::type Base; }; template class ProductImpl : public internal::dense_product_base { typedef Product Derived; public: typedef typename internal::dense_product_base Base; EIGEN_DENSE_PUBLIC_INTERFACE(Derived) protected: enum { IsOneByOne = (RowsAtCompileTime == 1 || RowsAtCompileTime == Dynamic) && (ColsAtCompileTime == 1 || ColsAtCompileTime == Dynamic), EnableCoeff = IsOneByOne || Option==LazyProduct }; public: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index row, Index col) const { EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS); eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) ); return internal::evaluator(derived()).coeff(row,col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index i) const { EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS); eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) ); return internal::evaluator(derived()).coeff(i); } }; } // end namespace Eigen #endif // EIGEN_PRODUCT_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/ProductEvaluators.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2008-2010 Gael Guennebaud // Copyright (C) 2011 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PRODUCTEVALUATORS_H #define EIGEN_PRODUCTEVALUATORS_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { /** \internal * Evaluator of a product expression. * Since products require special treatments to handle all possible cases, * we simply defer the evaluation logic to a product_evaluator class * which offers more partial specialization possibilities. * * \sa class product_evaluator */ template struct evaluator > : public product_evaluator > { typedef Product XprType; typedef product_evaluator Base; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(xpr) {} }; // Catch "scalar * ( A * B )" and transform it to "(A*scalar) * B" // TODO we should apply that rule only if that's really helpful template struct evaluator_assume_aliasing, const CwiseNullaryOp, Plain1>, const Product > > { static const bool value = true; }; template struct evaluator, const CwiseNullaryOp, Plain1>, const Product > > : public evaluator > { typedef CwiseBinaryOp, const CwiseNullaryOp, Plain1>, const Product > XprType; typedef evaluator > Base; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(xpr.lhs().functor().m_other * xpr.rhs().lhs() * xpr.rhs().rhs()) {} }; template struct evaluator, DiagIndex> > : public evaluator, DiagIndex> > { typedef Diagonal, DiagIndex> XprType; typedef evaluator, DiagIndex> > Base; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(Diagonal, DiagIndex>( Product(xpr.nestedExpression().lhs(), xpr.nestedExpression().rhs()), xpr.index() )) {} }; // Helper class to perform a matrix product with the destination at hand. // Depending on the sizes of the factors, there are different evaluation strategies // as controlled by internal::product_type. template< typename Lhs, typename Rhs, typename LhsShape = typename evaluator_traits::Shape, typename RhsShape = typename evaluator_traits::Shape, int ProductType = internal::product_type::value> struct generic_product_impl; template struct evaluator_assume_aliasing > { static const bool value = true; }; // This is the default evaluator implementation for products: // It creates a temporary and call generic_product_impl template struct product_evaluator, ProductTag, LhsShape, RhsShape> : public evaluator::PlainObject> { typedef Product XprType; typedef typename XprType::PlainObject PlainObject; typedef evaluator Base; enum { Flags = Base::Flags | EvalBeforeNestingBit }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit product_evaluator(const XprType& xpr) : m_result(xpr.rows(), xpr.cols()) { ::new (static_cast(this)) Base(m_result); // FIXME shall we handle nested_eval here?, // if so, then we must take care at removing the call to nested_eval in the specializations (e.g., in permutation_matrix_product, transposition_matrix_product, etc.) // typedef typename internal::nested_eval::type LhsNested; // typedef typename internal::nested_eval::type RhsNested; // typedef typename internal::remove_all::type LhsNestedCleaned; // typedef typename internal::remove_all::type RhsNestedCleaned; // // const LhsNested lhs(xpr.lhs()); // const RhsNested rhs(xpr.rhs()); // // generic_product_impl::evalTo(m_result, lhs, rhs); generic_product_impl::evalTo(m_result, xpr.lhs(), xpr.rhs()); } protected: PlainObject m_result; }; // The following three shortcuts are enabled only if the scalar types match exactly. // TODO: we could enable them for different scalar types when the product is not vectorized. // Dense = Product template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar> struct Assignment, internal::assign_op, Dense2Dense, typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type> { typedef Product SrcXprType; static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); // FIXME shall we handle nested_eval here? generic_product_impl::evalTo(dst, src.lhs(), src.rhs()); } }; // Dense += Product template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar> struct Assignment, internal::add_assign_op, Dense2Dense, typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type> { typedef Product SrcXprType; static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op &) { eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); // FIXME shall we handle nested_eval here? generic_product_impl::addTo(dst, src.lhs(), src.rhs()); } }; // Dense -= Product template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar> struct Assignment, internal::sub_assign_op, Dense2Dense, typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type> { typedef Product SrcXprType; static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op &) { eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); // FIXME shall we handle nested_eval here? generic_product_impl::subTo(dst, src.lhs(), src.rhs()); } }; // Dense ?= scalar * Product // TODO we should apply that rule if that's really helpful // for instance, this is not good for inner products template< typename DstXprType, typename Lhs, typename Rhs, typename AssignFunc, typename Scalar, typename ScalarBis, typename Plain> struct Assignment, const CwiseNullaryOp,Plain>, const Product >, AssignFunc, Dense2Dense> { typedef CwiseBinaryOp, const CwiseNullaryOp,Plain>, const Product > SrcXprType; static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const AssignFunc& func) { call_assignment_no_alias(dst, (src.lhs().functor().m_other * src.rhs().lhs())*src.rhs().rhs(), func); } }; //---------------------------------------- // Catch "Dense ?= xpr + Product<>" expression to save one temporary // FIXME we could probably enable these rules for any product, i.e., not only Dense and DefaultProduct template struct evaluator_assume_aliasing::Scalar>, const OtherXpr, const Product >, DenseShape > { static const bool value = true; }; template struct evaluator_assume_aliasing::Scalar>, const OtherXpr, const Product >, DenseShape > { static const bool value = true; }; template struct assignment_from_xpr_op_product { template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const InitialFunc& /*func*/) { call_assignment_no_alias(dst, src.lhs(), Func1()); call_assignment_no_alias(dst, src.rhs(), Func2()); } }; #define EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(ASSIGN_OP,BINOP,ASSIGN_OP2) \ template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename DstScalar, typename SrcScalar, typename OtherScalar,typename ProdScalar> \ struct Assignment, const OtherXpr, \ const Product >, internal::ASSIGN_OP, Dense2Dense> \ : assignment_from_xpr_op_product, internal::ASSIGN_OP, internal::ASSIGN_OP2 > \ {} EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(assign_op, scalar_sum_op,add_assign_op); EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(add_assign_op,scalar_sum_op,add_assign_op); EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(sub_assign_op,scalar_sum_op,sub_assign_op); EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(assign_op, scalar_difference_op,sub_assign_op); EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(add_assign_op,scalar_difference_op,sub_assign_op); EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(sub_assign_op,scalar_difference_op,add_assign_op); //---------------------------------------- template struct generic_product_impl { template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { dst.coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum(); } template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { dst.coeffRef(0,0) += (lhs.transpose().cwiseProduct(rhs)).sum(); } template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { dst.coeffRef(0,0) -= (lhs.transpose().cwiseProduct(rhs)).sum(); } }; /*********************************************************************** * Implementation of outer dense * dense vector product ***********************************************************************/ // Column major result template void EIGEN_DEVICE_FUNC outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const false_type&) { evaluator rhsEval(rhs); ei_declare_local_nested_eval(Lhs,lhs,Rhs::SizeAtCompileTime,actual_lhs); // FIXME if cols is large enough, then it might be useful to make sure that lhs is sequentially stored // FIXME not very good if rhs is real and lhs complex while alpha is real too const Index cols = dst.cols(); for (Index j=0; j void EIGEN_DEVICE_FUNC outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const true_type&) { evaluator lhsEval(lhs); ei_declare_local_nested_eval(Rhs,rhs,Lhs::SizeAtCompileTime,actual_rhs); // FIXME if rows is large enough, then it might be useful to make sure that rhs is sequentially stored // FIXME not very good if lhs is real and rhs complex while alpha is real too const Index rows = dst.rows(); for (Index i=0; i struct generic_product_impl { template struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {}; typedef typename Product::Scalar Scalar; // TODO it would be nice to be able to exploit our *_assign_op functors for that purpose struct set { template EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } }; struct add { template EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } }; struct sub { template EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } }; struct adds { Scalar m_scale; explicit adds(const Scalar& s) : m_scale(s) {} template void EIGEN_DEVICE_FUNC operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += m_scale * src; } }; template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { internal::outer_product_selector_run(dst, lhs, rhs, set(), is_row_major()); } template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { internal::outer_product_selector_run(dst, lhs, rhs, add(), is_row_major()); } template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { internal::outer_product_selector_run(dst, lhs, rhs, sub(), is_row_major()); } template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { internal::outer_product_selector_run(dst, lhs, rhs, adds(alpha), is_row_major()); } }; // This base class provides default implementations for evalTo, addTo, subTo, in terms of scaleAndAddTo template struct generic_product_impl_base { typedef typename Product::Scalar Scalar; template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { Derived::scaleAndAddTo(dst,lhs,rhs,alpha); } }; template struct generic_product_impl : generic_product_impl_base > { typedef typename nested_eval::type LhsNested; typedef typename nested_eval::type RhsNested; typedef typename Product::Scalar Scalar; enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight }; typedef typename internal::remove_all::type>::type MatrixType; template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { // Fallback to inner product if both the lhs and rhs is a runtime vector. if (lhs.rows() == 1 && rhs.cols() == 1) { dst.coeffRef(0,0) += alpha * lhs.row(0).conjugate().dot(rhs.col(0)); return; } LhsNested actual_lhs(lhs); RhsNested actual_rhs(rhs); internal::gemv_dense_selector::HasUsableDirectAccess) >::run(actual_lhs, actual_rhs, dst, alpha); } }; template struct generic_product_impl { typedef typename Product::Scalar Scalar; template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { // Same as: dst.noalias() = lhs.lazyProduct(rhs); // but easier on the compiler side call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::assign_op()); } template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { // dst.noalias() += lhs.lazyProduct(rhs); call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op()); } template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { // dst.noalias() -= lhs.lazyProduct(rhs); call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::sub_assign_op()); } // This is a special evaluation path called from generic_product_impl<...,GemmProduct> in file GeneralMatrixMatrix.h // This variant tries to extract scalar multiples from both the LHS and RHS and factor them out. For instance: // dst {,+,-}= (s1*A)*(B*s2) // will be rewritten as: // dst {,+,-}= (s1*s2) * (A.lazyProduct(B)) // There are at least four benefits of doing so: // 1 - huge performance gain for heap-allocated matrix types as it save costly allocations. // 2 - it is faster than simply by-passing the heap allocation through stack allocation. // 3 - it makes this fallback consistent with the heavy GEMM routine. // 4 - it fully by-passes huge stack allocation attempts when multiplying huge fixed-size matrices. // (see https://stackoverflow.com/questions/54738495) // For small fixed sizes matrices, however, the gains are less obvious, it is sometimes x2 faster, but sometimes x3 slower, // and the behavior depends also a lot on the compiler... This is why this re-writing strategy is currently // enabled only when falling back from the main GEMM. template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void eval_dynamic(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Func &func) { enum { HasScalarFactor = blas_traits::HasScalarFactor || blas_traits::HasScalarFactor, ConjLhs = blas_traits::NeedToConjugate, ConjRhs = blas_traits::NeedToConjugate }; // FIXME: in c++11 this should be auto, and extractScalarFactor should also return auto // this is important for real*complex_mat Scalar actualAlpha = combine_scalar_factors(lhs, rhs); eval_dynamic_impl(dst, blas_traits::extract(lhs).template conjugateIf(), blas_traits::extract(rhs).template conjugateIf(), func, actualAlpha, typename conditional::type()); } protected: template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void eval_dynamic_impl(Dst& dst, const LhsT& lhs, const RhsT& rhs, const Func &func, const Scalar& s /* == 1 */, false_type) { EIGEN_UNUSED_VARIABLE(s); eigen_internal_assert(s==Scalar(1)); call_restricted_packet_assignment_no_alias(dst, lhs.lazyProduct(rhs), func); } template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void eval_dynamic_impl(Dst& dst, const LhsT& lhs, const RhsT& rhs, const Func &func, const Scalar& s, true_type) { call_restricted_packet_assignment_no_alias(dst, s * lhs.lazyProduct(rhs), func); } }; // This specialization enforces the use of a coefficient-based evaluation strategy template struct generic_product_impl : generic_product_impl {}; // Case 2: Evaluate coeff by coeff // // This is mostly taken from CoeffBasedProduct.h // The main difference is that we add an extra argument to the etor_product_*_impl::run() function // for the inner dimension of the product, because evaluator object do not know their size. template struct etor_product_coeff_impl; template struct etor_product_packet_impl; template struct product_evaluator, ProductTag, DenseShape, DenseShape> : evaluator_base > { typedef Product XprType; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit product_evaluator(const XprType& xpr) : m_lhs(xpr.lhs()), m_rhs(xpr.rhs()), m_lhsImpl(m_lhs), // FIXME the creation of the evaluator objects should result in a no-op, but check that! m_rhsImpl(m_rhs), // Moreover, they are only useful for the packet path, so we could completely disable them when not needed, // or perhaps declare them on the fly on the packet method... We have experiment to check what's best. m_innerDim(xpr.lhs().cols()) { EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits::MulCost); EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits::AddCost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); #if 0 std::cerr << "LhsOuterStrideBytes= " << LhsOuterStrideBytes << "\n"; std::cerr << "RhsOuterStrideBytes= " << RhsOuterStrideBytes << "\n"; std::cerr << "LhsAlignment= " << LhsAlignment << "\n"; std::cerr << "RhsAlignment= " << RhsAlignment << "\n"; std::cerr << "CanVectorizeLhs= " << CanVectorizeLhs << "\n"; std::cerr << "CanVectorizeRhs= " << CanVectorizeRhs << "\n"; std::cerr << "CanVectorizeInner= " << CanVectorizeInner << "\n"; std::cerr << "EvalToRowMajor= " << EvalToRowMajor << "\n"; std::cerr << "Alignment= " << Alignment << "\n"; std::cerr << "Flags= " << Flags << "\n"; #endif } // Everything below here is taken from CoeffBasedProduct.h typedef typename internal::nested_eval::type LhsNested; typedef typename internal::nested_eval::type RhsNested; typedef typename internal::remove_all::type LhsNestedCleaned; typedef typename internal::remove_all::type RhsNestedCleaned; typedef evaluator LhsEtorType; typedef evaluator RhsEtorType; enum { RowsAtCompileTime = LhsNestedCleaned::RowsAtCompileTime, ColsAtCompileTime = RhsNestedCleaned::ColsAtCompileTime, InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsNestedCleaned::ColsAtCompileTime, RhsNestedCleaned::RowsAtCompileTime), MaxRowsAtCompileTime = LhsNestedCleaned::MaxRowsAtCompileTime, MaxColsAtCompileTime = RhsNestedCleaned::MaxColsAtCompileTime }; typedef typename find_best_packet::type LhsVecPacketType; typedef typename find_best_packet::type RhsVecPacketType; enum { LhsCoeffReadCost = LhsEtorType::CoeffReadCost, RhsCoeffReadCost = RhsEtorType::CoeffReadCost, CoeffReadCost = InnerSize==0 ? NumTraits::ReadCost : InnerSize == Dynamic ? HugeCost : InnerSize * (NumTraits::MulCost + int(LhsCoeffReadCost) + int(RhsCoeffReadCost)) + (InnerSize - 1) * NumTraits::AddCost, Unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT, LhsFlags = LhsEtorType::Flags, RhsFlags = RhsEtorType::Flags, LhsRowMajor = LhsFlags & RowMajorBit, RhsRowMajor = RhsFlags & RowMajorBit, LhsVecPacketSize = unpacket_traits::size, RhsVecPacketSize = unpacket_traits::size, // Here, we don't care about alignment larger than the usable packet size. LhsAlignment = EIGEN_PLAIN_ENUM_MIN(LhsEtorType::Alignment,LhsVecPacketSize*int(sizeof(typename LhsNestedCleaned::Scalar))), RhsAlignment = EIGEN_PLAIN_ENUM_MIN(RhsEtorType::Alignment,RhsVecPacketSize*int(sizeof(typename RhsNestedCleaned::Scalar))), SameType = is_same::value, CanVectorizeRhs = bool(RhsRowMajor) && (RhsFlags & PacketAccessBit) && (ColsAtCompileTime!=1), CanVectorizeLhs = (!LhsRowMajor) && (LhsFlags & PacketAccessBit) && (RowsAtCompileTime!=1), EvalToRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 : (bool(RhsRowMajor) && !CanVectorizeLhs), Flags = ((int(LhsFlags) | int(RhsFlags)) & HereditaryBits & ~RowMajorBit) | (EvalToRowMajor ? RowMajorBit : 0) // TODO enable vectorization for mixed types | (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0) | (XprType::IsVectorAtCompileTime ? LinearAccessBit : 0), LhsOuterStrideBytes = int(LhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename LhsNestedCleaned::Scalar)), RhsOuterStrideBytes = int(RhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename RhsNestedCleaned::Scalar)), Alignment = bool(CanVectorizeLhs) ? (LhsOuterStrideBytes<=0 || (int(LhsOuterStrideBytes) % EIGEN_PLAIN_ENUM_MAX(1,LhsAlignment))!=0 ? 0 : LhsAlignment) : bool(CanVectorizeRhs) ? (RhsOuterStrideBytes<=0 || (int(RhsOuterStrideBytes) % EIGEN_PLAIN_ENUM_MAX(1,RhsAlignment))!=0 ? 0 : RhsAlignment) : 0, /* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside * of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner * loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect * the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI. */ CanVectorizeInner = SameType && LhsRowMajor && (!RhsRowMajor) && (int(LhsFlags) & int(RhsFlags) & ActualPacketAccessBit) && (int(InnerSize) % packet_traits::size == 0) }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const { return (m_lhs.row(row).transpose().cwiseProduct( m_rhs.col(col) )).sum(); } /* Allow index-based non-packet access. It is impossible though to allow index-based packed access, * which is why we don't set the LinearAccessBit. * TODO: this seems possible when the result is a vector */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index index) const { const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? 0 : index; const Index col = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? index : 0; return (m_lhs.row(row).transpose().cwiseProduct( m_rhs.col(col) )).sum(); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const PacketType packet(Index row, Index col) const { PacketType res; typedef etor_product_packet_impl PacketImpl; PacketImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res); return res; } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const PacketType packet(Index index) const { const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? 0 : index; const Index col = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? index : 0; return packet(row,col); } protected: typename internal::add_const_on_value_type::type m_lhs; typename internal::add_const_on_value_type::type m_rhs; LhsEtorType m_lhsImpl; RhsEtorType m_rhsImpl; // TODO: Get rid of m_innerDim if known at compile time Index m_innerDim; }; template struct product_evaluator, LazyCoeffBasedProductMode, DenseShape, DenseShape> : product_evaluator, CoeffBasedProductMode, DenseShape, DenseShape> { typedef Product XprType; typedef Product BaseProduct; typedef product_evaluator Base; enum { Flags = Base::Flags | EvalBeforeNestingBit }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit product_evaluator(const XprType& xpr) : Base(BaseProduct(xpr.lhs(),xpr.rhs())) {} }; /**************************************** *** Coeff based product, Packet path *** ****************************************/ template struct etor_product_packet_impl { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) { etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); res = pmadd(pset1(lhs.coeff(row, Index(UnrollingIndex-1))), rhs.template packet(Index(UnrollingIndex-1), col), res); } }; template struct etor_product_packet_impl { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) { etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); res = pmadd(lhs.template packet(row, Index(UnrollingIndex-1)), pset1(rhs.coeff(Index(UnrollingIndex-1), col)), res); } }; template struct etor_product_packet_impl { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) { res = pmul(pset1(lhs.coeff(row, Index(0))),rhs.template packet(Index(0), col)); } }; template struct etor_product_packet_impl { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) { res = pmul(lhs.template packet(row, Index(0)), pset1(rhs.coeff(Index(0), col))); } }; template struct etor_product_packet_impl { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res) { res = pset1(typename unpacket_traits::type(0)); } }; template struct etor_product_packet_impl { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res) { res = pset1(typename unpacket_traits::type(0)); } }; template struct etor_product_packet_impl { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) { res = pset1(typename unpacket_traits::type(0)); for(Index i = 0; i < innerDim; ++i) res = pmadd(pset1(lhs.coeff(row, i)), rhs.template packet(i, col), res); } }; template struct etor_product_packet_impl { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) { res = pset1(typename unpacket_traits::type(0)); for(Index i = 0; i < innerDim; ++i) res = pmadd(lhs.template packet(row, i), pset1(rhs.coeff(i, col)), res); } }; /*************************************************************************** * Triangular products ***************************************************************************/ template struct triangular_product_impl; template struct generic_product_impl : generic_product_impl_base > { typedef typename Product::Scalar Scalar; template static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { triangular_product_impl ::run(dst, lhs.nestedExpression(), rhs, alpha); } }; template struct generic_product_impl : generic_product_impl_base > { typedef typename Product::Scalar Scalar; template static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { triangular_product_impl::run(dst, lhs, rhs.nestedExpression(), alpha); } }; /*************************************************************************** * SelfAdjoint products ***************************************************************************/ template struct selfadjoint_product_impl; template struct generic_product_impl : generic_product_impl_base > { typedef typename Product::Scalar Scalar; template static EIGEN_DEVICE_FUNC void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { selfadjoint_product_impl::run(dst, lhs.nestedExpression(), rhs, alpha); } }; template struct generic_product_impl : generic_product_impl_base > { typedef typename Product::Scalar Scalar; template static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { selfadjoint_product_impl::run(dst, lhs, rhs.nestedExpression(), alpha); } }; /*************************************************************************** * Diagonal products ***************************************************************************/ template struct diagonal_product_evaluator_base : evaluator_base { typedef typename ScalarBinaryOpTraits::ReturnType Scalar; public: enum { CoeffReadCost = int(NumTraits::MulCost) + int(evaluator::CoeffReadCost) + int(evaluator::CoeffReadCost), MatrixFlags = evaluator::Flags, DiagFlags = evaluator::Flags, StorageOrder_ = (Derived::MaxRowsAtCompileTime==1 && Derived::MaxColsAtCompileTime!=1) ? RowMajor : (Derived::MaxColsAtCompileTime==1 && Derived::MaxRowsAtCompileTime!=1) ? ColMajor : MatrixFlags & RowMajorBit ? RowMajor : ColMajor, _SameStorageOrder = StorageOrder_ == (MatrixFlags & RowMajorBit ? RowMajor : ColMajor), _ScalarAccessOnDiag = !((int(StorageOrder_) == ColMajor && int(ProductOrder) == OnTheLeft) ||(int(StorageOrder_) == RowMajor && int(ProductOrder) == OnTheRight)), _SameTypes = is_same::value, // FIXME currently we need same types, but in the future the next rule should be the one //_Vectorizable = bool(int(MatrixFlags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagFlags)&PacketAccessBit))), _Vectorizable = bool(int(MatrixFlags)&PacketAccessBit) && _SameTypes && (_SameStorageOrder || (MatrixFlags&LinearAccessBit)==LinearAccessBit) && (_ScalarAccessOnDiag || (bool(int(DiagFlags)&PacketAccessBit))), _LinearAccessMask = (MatrixType::RowsAtCompileTime==1 || MatrixType::ColsAtCompileTime==1) ? LinearAccessBit : 0, Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixFlags)) | (_Vectorizable ? PacketAccessBit : 0), Alignment = evaluator::Alignment, AsScalarProduct = (DiagonalType::SizeAtCompileTime==1) || (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::RowsAtCompileTime==1 && ProductOrder==OnTheLeft) || (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime==1 && ProductOrder==OnTheRight) }; EIGEN_DEVICE_FUNC diagonal_product_evaluator_base(const MatrixType &mat, const DiagonalType &diag) : m_diagImpl(diag), m_matImpl(mat) { EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits::MulCost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const { if(AsScalarProduct) return m_diagImpl.coeff(0) * m_matImpl.coeff(idx); else return m_diagImpl.coeff(idx) * m_matImpl.coeff(idx); } protected: template EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::true_type) const { return internal::pmul(m_matImpl.template packet(row, col), internal::pset1(m_diagImpl.coeff(id))); } template EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::false_type) const { enum { InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime, DiagonalPacketLoadMode = EIGEN_PLAIN_ENUM_MIN(LoadMode,((InnerSize%16) == 0) ? int(Aligned16) : int(evaluator::Alignment)) // FIXME hardcoded 16!! }; return internal::pmul(m_matImpl.template packet(row, col), m_diagImpl.template packet(id)); } evaluator m_diagImpl; evaluator m_matImpl; }; // diagonal * dense template struct product_evaluator, ProductTag, DiagonalShape, DenseShape> : diagonal_product_evaluator_base, OnTheLeft> { typedef diagonal_product_evaluator_base, OnTheLeft> Base; using Base::m_diagImpl; using Base::m_matImpl; using Base::coeff; typedef typename Base::Scalar Scalar; typedef Product XprType; typedef typename XprType::PlainObject PlainObject; typedef typename Lhs::DiagonalVectorType DiagonalType; enum { StorageOrder = Base::StorageOrder_ }; EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) : Base(xpr.rhs(), xpr.lhs().diagonal()) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const { return m_diagImpl.coeff(row) * m_matImpl.coeff(row, col); } #ifndef EIGEN_GPUCC template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { // FIXME: NVCC used to complain about the template keyword, but we have to check whether this is still the case. // See also similar calls below. return this->template packet_impl(row,col, row, typename internal::conditional::type()); } template EIGEN_STRONG_INLINE PacketType packet(Index idx) const { return packet(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx); } #endif }; // dense * diagonal template struct product_evaluator, ProductTag, DenseShape, DiagonalShape> : diagonal_product_evaluator_base, OnTheRight> { typedef diagonal_product_evaluator_base, OnTheRight> Base; using Base::m_diagImpl; using Base::m_matImpl; using Base::coeff; typedef typename Base::Scalar Scalar; typedef Product XprType; typedef typename XprType::PlainObject PlainObject; enum { StorageOrder = Base::StorageOrder_ }; EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs().diagonal()) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const { return m_matImpl.coeff(row, col) * m_diagImpl.coeff(col); } #ifndef EIGEN_GPUCC template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return this->template packet_impl(row,col, col, typename internal::conditional::type()); } template EIGEN_STRONG_INLINE PacketType packet(Index idx) const { return packet(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx); } #endif }; /*************************************************************************** * Products with permutation matrices ***************************************************************************/ /** \internal * \class permutation_matrix_product * Internal helper class implementing the product between a permutation matrix and a matrix. * This class is specialized for DenseShape below and for SparseShape in SparseCore/SparsePermutation.h */ template struct permutation_matrix_product; template struct permutation_matrix_product { typedef typename nested_eval::type MatrixType; typedef typename remove_all::type MatrixTypeCleaned; template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr) { MatrixType mat(xpr); const Index n = Side==OnTheLeft ? mat.rows() : mat.cols(); // FIXME we need an is_same for expression that is not sensitive to constness. For instance // is_same_xpr, Block >::value should be true. //if(is_same::value && extract_data(dst) == extract_data(mat)) if(is_same_dense(dst, mat)) { // apply the permutation inplace Matrix mask(perm.size()); mask.fill(false); Index r = 0; while(r < perm.size()) { // search for the next seed while(r=perm.size()) break; // we got one, let's follow it until we are back to the seed Index k0 = r++; Index kPrev = k0; mask.coeffRef(k0) = true; for(Index k=perm.indices().coeff(k0); k!=k0; k=perm.indices().coeff(k)) { Block(dst, k) .swap(Block (dst,((Side==OnTheLeft) ^ Transposed) ? k0 : kPrev)); mask.coeffRef(k) = true; kPrev = k; } } } else { for(Index i = 0; i < n; ++i) { Block (dst, ((Side==OnTheLeft) ^ Transposed) ? perm.indices().coeff(i) : i) = Block (mat, ((Side==OnTheRight) ^ Transposed) ? perm.indices().coeff(i) : i); } } } }; template struct generic_product_impl { template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { permutation_matrix_product::run(dst, lhs, rhs); } }; template struct generic_product_impl { template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { permutation_matrix_product::run(dst, rhs, lhs); } }; template struct generic_product_impl, Rhs, PermutationShape, MatrixShape, ProductTag> { template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Inverse& lhs, const Rhs& rhs) { permutation_matrix_product::run(dst, lhs.nestedExpression(), rhs); } }; template struct generic_product_impl, MatrixShape, PermutationShape, ProductTag> { template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Inverse& rhs) { permutation_matrix_product::run(dst, rhs.nestedExpression(), lhs); } }; /*************************************************************************** * Products with transpositions matrices ***************************************************************************/ // FIXME could we unify Transpositions and Permutation into a single "shape"?? /** \internal * \class transposition_matrix_product * Internal helper class implementing the product between a permutation matrix and a matrix. */ template struct transposition_matrix_product { typedef typename nested_eval::type MatrixType; typedef typename remove_all::type MatrixTypeCleaned; template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Dest& dst, const TranspositionType& tr, const ExpressionType& xpr) { MatrixType mat(xpr); typedef typename TranspositionType::StorageIndex StorageIndex; const Index size = tr.size(); StorageIndex j = 0; if(!is_same_dense(dst,mat)) dst = mat; for(Index k=(Transposed?size-1:0) ; Transposed?k>=0:k struct generic_product_impl { template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { transposition_matrix_product::run(dst, lhs, rhs); } }; template struct generic_product_impl { template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { transposition_matrix_product::run(dst, rhs, lhs); } }; template struct generic_product_impl, Rhs, TranspositionsShape, MatrixShape, ProductTag> { template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Transpose& lhs, const Rhs& rhs) { transposition_matrix_product::run(dst, lhs.nestedExpression(), rhs); } }; template struct generic_product_impl, MatrixShape, TranspositionsShape, ProductTag> { template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Transpose& rhs) { transposition_matrix_product::run(dst, rhs.nestedExpression(), lhs); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_PRODUCT_EVALUATORS_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Random.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_RANDOM_H #define EIGEN_RANDOM_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct scalar_random_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_random_op) inline const Scalar operator() () const { return random(); } }; template struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false, IsRepeatable = false }; }; } // end namespace internal /** \returns a random matrix expression * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * \not_reentrant * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Random() should be used * instead. * * * Example: \include MatrixBase_random_int_int.cpp * Output: \verbinclude MatrixBase_random_int_int.out * * This expression has the "evaluate before nesting" flag so that it will be evaluated into * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected * behavior with expressions involving random matrices. * * See DenseBase::NullaryExpr(Index, const CustomNullaryOp&) for an example using C++11 random generators. * * \sa DenseBase::setRandom(), DenseBase::Random(Index), DenseBase::Random() */ template inline const typename DenseBase::RandomReturnType DenseBase::Random(Index rows, Index cols) { return NullaryExpr(rows, cols, internal::scalar_random_op()); } /** \returns a random vector expression * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * The parameter \a size is the size of the returned vector. * Must be compatible with this MatrixBase type. * * \only_for_vectors * \not_reentrant * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Random() should be used * instead. * * Example: \include MatrixBase_random_int.cpp * Output: \verbinclude MatrixBase_random_int.out * * This expression has the "evaluate before nesting" flag so that it will be evaluated into * a temporary vector whenever it is nested in a larger expression. This prevents unexpected * behavior with expressions involving random matrices. * * \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random() */ template inline const typename DenseBase::RandomReturnType DenseBase::Random(Index size) { return NullaryExpr(size, internal::scalar_random_op()); } /** \returns a fixed-size random matrix or vector expression * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * Example: \include MatrixBase_random.cpp * Output: \verbinclude MatrixBase_random.out * * This expression has the "evaluate before nesting" flag so that it will be evaluated into * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected * behavior with expressions involving random matrices. * * \not_reentrant * * \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random(Index) */ template inline const typename DenseBase::RandomReturnType DenseBase::Random() { return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_random_op()); } /** Sets all coefficients in this expression to random values. * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * \not_reentrant * * Example: \include MatrixBase_setRandom.cpp * Output: \verbinclude MatrixBase_setRandom.out * * \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index) */ template EIGEN_DEVICE_FUNC inline Derived& DenseBase::setRandom() { return *this = Random(rows(), cols()); } /** Resizes to the given \a newSize, and sets all coefficients in this expression to random values. * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * \only_for_vectors * \not_reentrant * * Example: \include Matrix_setRandom_int.cpp * Output: \verbinclude Matrix_setRandom_int.out * * \sa DenseBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, DenseBase::Random() */ template EIGEN_STRONG_INLINE Derived& PlainObjectBase::setRandom(Index newSize) { resize(newSize); return setRandom(); } /** Resizes to the given size, and sets all coefficients in this expression to random values. * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * \not_reentrant * * \param rows the new number of rows * \param cols the new number of columns * * Example: \include Matrix_setRandom_int_int.cpp * Output: \verbinclude Matrix_setRandom_int_int.out * * \sa DenseBase::setRandom(), setRandom(Index), class CwiseNullaryOp, DenseBase::Random() */ template EIGEN_STRONG_INLINE Derived& PlainObjectBase::setRandom(Index rows, Index cols) { resize(rows, cols); return setRandom(); } /** Resizes to the given size, changing only the number of columns, and sets all * coefficients in this expression to random values. For the parameter of type * NoChange_t, just pass the special value \c NoChange. * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * \not_reentrant * * \sa DenseBase::setRandom(), setRandom(Index), setRandom(Index, NoChange_t), class CwiseNullaryOp, DenseBase::Random() */ template EIGEN_STRONG_INLINE Derived& PlainObjectBase::setRandom(NoChange_t, Index cols) { return setRandom(rows(), cols); } /** Resizes to the given size, changing only the number of rows, and sets all * coefficients in this expression to random values. For the parameter of type * NoChange_t, just pass the special value \c NoChange. * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * \not_reentrant * * \sa DenseBase::setRandom(), setRandom(Index), setRandom(NoChange_t, Index), class CwiseNullaryOp, DenseBase::Random() */ template EIGEN_STRONG_INLINE Derived& PlainObjectBase::setRandom(Index rows, NoChange_t) { return setRandom(rows, cols()); } } // end namespace Eigen #endif // EIGEN_RANDOM_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Redux.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_REDUX_H #define EIGEN_REDUX_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { // TODO // * implement other kind of vectorization // * factorize code /*************************************************************************** * Part 1 : the logic deciding a strategy for vectorization and unrolling ***************************************************************************/ template struct redux_traits { public: typedef typename find_best_packet::type PacketType; enum { PacketSize = unpacket_traits::size, InnerMaxSize = int(Evaluator::IsRowMajor) ? Evaluator::MaxColsAtCompileTime : Evaluator::MaxRowsAtCompileTime, OuterMaxSize = int(Evaluator::IsRowMajor) ? Evaluator::MaxRowsAtCompileTime : Evaluator::MaxColsAtCompileTime, SliceVectorizedWork = int(InnerMaxSize)==Dynamic ? Dynamic : int(OuterMaxSize)==Dynamic ? (int(InnerMaxSize)>=int(PacketSize) ? Dynamic : 0) : (int(InnerMaxSize)/int(PacketSize)) * int(OuterMaxSize) }; enum { MightVectorize = (int(Evaluator::Flags)&ActualPacketAccessBit) && (functor_traits::PacketAccess), MayLinearVectorize = bool(MightVectorize) && (int(Evaluator::Flags)&LinearAccessBit), MaySliceVectorize = bool(MightVectorize) && (int(SliceVectorizedWork)==Dynamic || int(SliceVectorizedWork)>=3) }; public: enum { Traversal = int(MayLinearVectorize) ? int(LinearVectorizedTraversal) : int(MaySliceVectorize) ? int(SliceVectorizedTraversal) : int(DefaultTraversal) }; public: enum { Cost = Evaluator::SizeAtCompileTime == Dynamic ? HugeCost : int(Evaluator::SizeAtCompileTime) * int(Evaluator::CoeffReadCost) + (Evaluator::SizeAtCompileTime-1) * functor_traits::Cost, UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize)) }; public: enum { Unrolling = Cost <= UnrollingLimit ? CompleteUnrolling : NoUnrolling }; #ifdef EIGEN_DEBUG_ASSIGN static void debug() { std::cerr << "Xpr: " << typeid(typename Evaluator::XprType).name() << std::endl; std::cerr.setf(std::ios::hex, std::ios::basefield); EIGEN_DEBUG_VAR(Evaluator::Flags) std::cerr.unsetf(std::ios::hex); EIGEN_DEBUG_VAR(InnerMaxSize) EIGEN_DEBUG_VAR(OuterMaxSize) EIGEN_DEBUG_VAR(SliceVectorizedWork) EIGEN_DEBUG_VAR(PacketSize) EIGEN_DEBUG_VAR(MightVectorize) EIGEN_DEBUG_VAR(MayLinearVectorize) EIGEN_DEBUG_VAR(MaySliceVectorize) std::cerr << "Traversal" << " = " << Traversal << " (" << demangle_traversal(Traversal) << ")" << std::endl; EIGEN_DEBUG_VAR(UnrollingLimit) std::cerr << "Unrolling" << " = " << Unrolling << " (" << demangle_unrolling(Unrolling) << ")" << std::endl; std::cerr << std::endl; } #endif }; /*************************************************************************** * Part 2 : unrollers ***************************************************************************/ /*** no vectorization ***/ template struct redux_novec_unroller { enum { HalfLength = Length/2 }; typedef typename Evaluator::Scalar Scalar; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator &eval, const Func& func) { return func(redux_novec_unroller::run(eval,func), redux_novec_unroller::run(eval,func)); } }; template struct redux_novec_unroller { enum { outer = Start / Evaluator::InnerSizeAtCompileTime, inner = Start % Evaluator::InnerSizeAtCompileTime }; typedef typename Evaluator::Scalar Scalar; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator &eval, const Func&) { return eval.coeffByOuterInner(outer, inner); } }; // This is actually dead code and will never be called. It is required // to prevent false warnings regarding failed inlining though // for 0 length run() will never be called at all. template struct redux_novec_unroller { typedef typename Evaluator::Scalar Scalar; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator&, const Func&) { return Scalar(); } }; /*** vectorization ***/ template struct redux_vec_unroller { template EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator &eval, const Func& func) { enum { PacketSize = unpacket_traits::size, HalfLength = Length/2 }; return func.packetOp( redux_vec_unroller::template run(eval,func), redux_vec_unroller::template run(eval,func) ); } }; template struct redux_vec_unroller { template EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator &eval, const Func&) { enum { PacketSize = unpacket_traits::size, index = Start * PacketSize, outer = index / int(Evaluator::InnerSizeAtCompileTime), inner = index % int(Evaluator::InnerSizeAtCompileTime), alignment = Evaluator::Alignment }; return eval.template packetByOuterInner(outer, inner); } }; /*************************************************************************** * Part 3 : implementation of all cases ***************************************************************************/ template::Traversal, int Unrolling = redux_traits::Unrolling > struct redux_impl; template struct redux_impl { typedef typename Evaluator::Scalar Scalar; template EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator &eval, const Func& func, const XprType& xpr) { eigen_assert(xpr.rows()>0 && xpr.cols()>0 && "you are using an empty matrix"); Scalar res; res = eval.coeffByOuterInner(0, 0); for(Index i = 1; i < xpr.innerSize(); ++i) res = func(res, eval.coeffByOuterInner(0, i)); for(Index i = 1; i < xpr.outerSize(); ++i) for(Index j = 0; j < xpr.innerSize(); ++j) res = func(res, eval.coeffByOuterInner(i, j)); return res; } }; template struct redux_impl : redux_novec_unroller { typedef redux_novec_unroller Base; typedef typename Evaluator::Scalar Scalar; template EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator &eval, const Func& func, const XprType& /*xpr*/) { return Base::run(eval,func); } }; template struct redux_impl { typedef typename Evaluator::Scalar Scalar; typedef typename redux_traits::PacketType PacketScalar; template static Scalar run(const Evaluator &eval, const Func& func, const XprType& xpr) { const Index size = xpr.size(); const Index packetSize = redux_traits::PacketSize; const int packetAlignment = unpacket_traits::alignment; enum { alignment0 = (bool(Evaluator::Flags & DirectAccessBit) && bool(packet_traits::AlignedOnScalar)) ? int(packetAlignment) : int(Unaligned), alignment = EIGEN_PLAIN_ENUM_MAX(alignment0, Evaluator::Alignment) }; const Index alignedStart = internal::first_default_aligned(xpr); const Index alignedSize2 = ((size-alignedStart)/(2*packetSize))*(2*packetSize); const Index alignedSize = ((size-alignedStart)/(packetSize))*(packetSize); const Index alignedEnd2 = alignedStart + alignedSize2; const Index alignedEnd = alignedStart + alignedSize; Scalar res; if(alignedSize) { PacketScalar packet_res0 = eval.template packet(alignedStart); if(alignedSize>packetSize) // we have at least two packets to partly unroll the loop { PacketScalar packet_res1 = eval.template packet(alignedStart+packetSize); for(Index index = alignedStart + 2*packetSize; index < alignedEnd2; index += 2*packetSize) { packet_res0 = func.packetOp(packet_res0, eval.template packet(index)); packet_res1 = func.packetOp(packet_res1, eval.template packet(index+packetSize)); } packet_res0 = func.packetOp(packet_res0,packet_res1); if(alignedEnd>alignedEnd2) packet_res0 = func.packetOp(packet_res0, eval.template packet(alignedEnd2)); } res = func.predux(packet_res0); for(Index index = 0; index < alignedStart; ++index) res = func(res,eval.coeff(index)); for(Index index = alignedEnd; index < size; ++index) res = func(res,eval.coeff(index)); } else // too small to vectorize anything. // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize. { res = eval.coeff(0); for(Index index = 1; index < size; ++index) res = func(res,eval.coeff(index)); } return res; } }; // NOTE: for SliceVectorizedTraversal we simply bypass unrolling template struct redux_impl { typedef typename Evaluator::Scalar Scalar; typedef typename redux_traits::PacketType PacketType; template EIGEN_DEVICE_FUNC static Scalar run(const Evaluator &eval, const Func& func, const XprType& xpr) { eigen_assert(xpr.rows()>0 && xpr.cols()>0 && "you are using an empty matrix"); const Index innerSize = xpr.innerSize(); const Index outerSize = xpr.outerSize(); enum { packetSize = redux_traits::PacketSize }; const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize; Scalar res; if(packetedInnerSize) { PacketType packet_res = eval.template packet(0,0); for(Index j=0; j(j,i)); res = func.predux(packet_res); for(Index j=0; j::run(eval, func, xpr); } return res; } }; template struct redux_impl { typedef typename Evaluator::Scalar Scalar; typedef typename redux_traits::PacketType PacketType; enum { PacketSize = redux_traits::PacketSize, Size = Evaluator::SizeAtCompileTime, VectorizedSize = (int(Size) / int(PacketSize)) * int(PacketSize) }; template EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator &eval, const Func& func, const XprType &xpr) { EIGEN_ONLY_USED_FOR_DEBUG(xpr) eigen_assert(xpr.rows()>0 && xpr.cols()>0 && "you are using an empty matrix"); if (VectorizedSize > 0) { Scalar res = func.predux(redux_vec_unroller::template run(eval,func)); if (VectorizedSize != Size) res = func(res,redux_novec_unroller::run(eval,func)); return res; } else { return redux_novec_unroller::run(eval,func); } } }; // evaluator adaptor template class redux_evaluator : public internal::evaluator { typedef internal::evaluator Base; public: typedef XprType_ XprType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit redux_evaluator(const XprType &xpr) : Base(xpr) {} typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; enum { MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime, MaxColsAtCompileTime = XprType::MaxColsAtCompileTime, // TODO we should not remove DirectAccessBit and rather find an elegant way to query the alignment offset at runtime from the evaluator Flags = Base::Flags & ~DirectAccessBit, IsRowMajor = XprType::IsRowMajor, SizeAtCompileTime = XprType::SizeAtCompileTime, InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const { return Base::coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packetByOuterInner(Index outer, Index inner) const { return Base::template packet(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); } }; } // end namespace internal /*************************************************************************** * Part 4 : public API ***************************************************************************/ /** \returns the result of a full redux operation on the whole matrix or vector using \a func * * The template parameter \a BinaryOp is the type of the functor \a func which must be * an associative operator. Both current C++98 and C++11 functor styles are handled. * * \warning the matrix must be not empty, otherwise an assertion is triggered. * * \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise() */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::redux(const Func& func) const { eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix"); typedef typename internal::redux_evaluator ThisEvaluator; ThisEvaluator thisEval(derived()); // The initial expression is passed to the reducer as an additional argument instead of // passing it as a member of redux_evaluator to help return internal::redux_impl::run(thisEval, func, derived()); } /** \returns the minimum of all coefficients of \c *this. * In case \c *this contains NaN, NaNPropagation determines the behavior: * NaNPropagation == PropagateFast : undefined * NaNPropagation == PropagateNaN : result is NaN * NaNPropagation == PropagateNumbers : result is minimum of elements that are not NaN * \warning the matrix must be not empty, otherwise an assertion is triggered. */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::minCoeff() const { return derived().redux(Eigen::internal::scalar_min_op()); } /** \returns the maximum of all coefficients of \c *this. * In case \c *this contains NaN, NaNPropagation determines the behavior: * NaNPropagation == PropagateFast : undefined * NaNPropagation == PropagateNaN : result is NaN * NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN * \warning the matrix must be not empty, otherwise an assertion is triggered. */ template template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::maxCoeff() const { return derived().redux(Eigen::internal::scalar_max_op()); } /** \returns the sum of all coefficients of \c *this * * If \c *this is empty, then the value 0 is returned. * * \sa trace(), prod(), mean() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::sum() const { if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0)) return Scalar(0); return derived().redux(Eigen::internal::scalar_sum_op()); } /** \returns the mean of all coefficients of *this * * \sa trace(), prod(), sum() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::mean() const { #ifdef __INTEL_COMPILER #pragma warning push #pragma warning ( disable : 2259 ) #endif return Scalar(derived().redux(Eigen::internal::scalar_sum_op())) / Scalar(this->size()); #ifdef __INTEL_COMPILER #pragma warning pop #endif } /** \returns the product of all coefficients of *this * * Example: \include MatrixBase_prod.cpp * Output: \verbinclude MatrixBase_prod.out * * \sa sum(), mean(), trace() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::prod() const { if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0)) return Scalar(1); return derived().redux(Eigen::internal::scalar_product_op()); } /** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal. * * \c *this can be any matrix, not necessarily square. * * \sa diagonal(), sum() */ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits::Scalar MatrixBase::trace() const { return derived().diagonal().sum(); } } // end namespace Eigen #endif // EIGEN_REDUX_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Ref.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_REF_H #define EIGEN_REF_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > : public traits > { typedef _PlainObjectType PlainObjectType; typedef _StrideType StrideType; enum { Options = Options_, Flags = traits >::Flags | NestByRefBit, Alignment = traits >::Alignment }; template struct match { enum { IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime, HasDirectAccess = internal::has_direct_access::ret, StorageOrderMatch = IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)), InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic) || int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime) || (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1), OuterStrideMatch = IsVectorAtCompileTime || int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime), // NOTE, this indirection of evaluator::Alignment is needed // to workaround a very strange bug in MSVC related to the instantiation // of has_*ary_operator in evaluator. // This line is surprisingly very sensitive. For instance, simply adding parenthesis // as "DerivedAlignment = (int(evaluator::Alignment))," will make MSVC fail... DerivedAlignment = int(evaluator::Alignment), AlignmentMatch = (int(traits::Alignment)==int(Unaligned)) || (DerivedAlignment >= int(Alignment)), // FIXME the first condition is not very clear, it should be replaced by the required alignment ScalarTypeMatch = internal::is_same::value, MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch && ScalarTypeMatch }; typedef typename internal::conditional::type type; }; }; template struct traits > : public traits {}; } template class RefBase : public MapBase { public: typedef typename internal::traits::PlainObjectType PlainObjectType; typedef typename internal::traits::StrideType StrideType; typedef MapBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(RefBase) EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const { return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const { return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer() : IsVectorAtCompileTime ? this->size() : int(Flags)&RowMajorBit ? this->cols() : this->rows(); } EIGEN_DEVICE_FUNC RefBase() : Base(0,RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime), // Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values: m_stride(StrideType::OuterStrideAtCompileTime==Dynamic?0:StrideType::OuterStrideAtCompileTime, StrideType::InnerStrideAtCompileTime==Dynamic?0:StrideType::InnerStrideAtCompileTime) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(RefBase) protected: typedef Stride StrideBase; // Resolves inner stride if default 0. static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index resolveInnerStride(Index inner) { return inner == 0 ? 1 : inner; } // Resolves outer stride if default 0. static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index resolveOuterStride(Index inner, Index outer, Index rows, Index cols, bool isVectorAtCompileTime, bool isRowMajor) { return outer == 0 ? isVectorAtCompileTime ? inner * rows * cols : isRowMajor ? inner * cols : inner * rows : outer; } // Returns true if construction is valid, false if there is a stride mismatch, // and fails if there is a size mismatch. template EIGEN_DEVICE_FUNC bool construct(Expression& expr) { // Check matrix sizes. If this is a compile-time vector, we do allow // implicitly transposing. EIGEN_STATIC_ASSERT( EIGEN_PREDICATE_SAME_MATRIX_SIZE(PlainObjectType, Expression) // If it is a vector, the transpose sizes might match. || ( PlainObjectType::IsVectorAtCompileTime && ((int(PlainObjectType::RowsAtCompileTime)==Eigen::Dynamic || int(Expression::ColsAtCompileTime)==Eigen::Dynamic || int(PlainObjectType::RowsAtCompileTime)==int(Expression::ColsAtCompileTime)) && (int(PlainObjectType::ColsAtCompileTime)==Eigen::Dynamic || int(Expression::RowsAtCompileTime)==Eigen::Dynamic || int(PlainObjectType::ColsAtCompileTime)==int(Expression::RowsAtCompileTime)))), YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES ) // Determine runtime rows and columns. Index rows = expr.rows(); Index cols = expr.cols(); if(PlainObjectType::RowsAtCompileTime==1) { eigen_assert(expr.rows()==1 || expr.cols()==1); rows = 1; cols = expr.size(); } else if(PlainObjectType::ColsAtCompileTime==1) { eigen_assert(expr.rows()==1 || expr.cols()==1); rows = expr.size(); cols = 1; } // Verify that the sizes are valid. eigen_assert( (PlainObjectType::RowsAtCompileTime == Dynamic) || (PlainObjectType::RowsAtCompileTime == rows)); eigen_assert( (PlainObjectType::ColsAtCompileTime == Dynamic) || (PlainObjectType::ColsAtCompileTime == cols)); // If this is a vector, we might be transposing, which means that stride should swap. const bool transpose = PlainObjectType::IsVectorAtCompileTime && (rows != expr.rows()); // If the storage format differs, we also need to swap the stride. const bool row_major = ((PlainObjectType::Flags)&RowMajorBit) != 0; const bool expr_row_major = (Expression::Flags&RowMajorBit) != 0; const bool storage_differs = (row_major != expr_row_major); const bool swap_stride = (transpose != storage_differs); // Determine expr's actual strides, resolving any defaults if zero. const Index expr_inner_actual = resolveInnerStride(expr.innerStride()); const Index expr_outer_actual = resolveOuterStride(expr_inner_actual, expr.outerStride(), expr.rows(), expr.cols(), Expression::IsVectorAtCompileTime != 0, expr_row_major); // If this is a column-major row vector or row-major column vector, the inner-stride // is arbitrary, so set it to either the compile-time inner stride or 1. const bool row_vector = (rows == 1); const bool col_vector = (cols == 1); const Index inner_stride = ( (!row_major && row_vector) || (row_major && col_vector) ) ? ( StrideType::InnerStrideAtCompileTime > 0 ? Index(StrideType::InnerStrideAtCompileTime) : 1) : swap_stride ? expr_outer_actual : expr_inner_actual; // If this is a column-major column vector or row-major row vector, the outer-stride // is arbitrary, so set it to either the compile-time outer stride or vector size. const Index outer_stride = ( (!row_major && col_vector) || (row_major && row_vector) ) ? ( StrideType::OuterStrideAtCompileTime > 0 ? Index(StrideType::OuterStrideAtCompileTime) : rows * cols * inner_stride) : swap_stride ? expr_inner_actual : expr_outer_actual; // Check if given inner/outer strides are compatible with compile-time strides. const bool inner_valid = (StrideType::InnerStrideAtCompileTime == Dynamic) || (resolveInnerStride(Index(StrideType::InnerStrideAtCompileTime)) == inner_stride); if (!inner_valid) { return false; } const bool outer_valid = (StrideType::OuterStrideAtCompileTime == Dynamic) || (resolveOuterStride( inner_stride, Index(StrideType::OuterStrideAtCompileTime), rows, cols, PlainObjectType::IsVectorAtCompileTime != 0, row_major) == outer_stride); if (!outer_valid) { return false; } ::new (static_cast(this)) Base(expr.data(), rows, cols); ::new (&m_stride) StrideBase( (StrideType::OuterStrideAtCompileTime == 0) ? 0 : outer_stride, (StrideType::InnerStrideAtCompileTime == 0) ? 0 : inner_stride ); return true; } StrideBase m_stride; }; /** \class Ref * \ingroup Core_Module * * \brief A matrix or vector expression mapping an existing expression * * \tparam PlainObjectType the equivalent matrix type of the mapped data * \tparam Options specifies the pointer alignment in bytes. It can be: \c #Aligned128, , \c #Aligned64, \c #Aligned32, \c #Aligned16, \c #Aligned8 or \c #Unaligned. * The default is \c #Unaligned. * \tparam StrideType optionally specifies strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1), * but accepts a variable outer stride (leading dimension). * This can be overridden by specifying strides. * The type passed here must be a specialization of the Stride template, see examples below. * * This class provides a way to write non-template functions taking Eigen objects as parameters while limiting the number of copies. * A Ref<> object can represent either a const expression or a l-value: * \code * // in-out argument: * void foo1(Ref x); * * // read-only const argument: * void foo2(const Ref& x); * \endcode * * In the in-out case, the input argument must satisfy the constraints of the actual Ref<> type, otherwise a compilation issue will be triggered. * By default, a Ref can reference any dense vector expression of float having a contiguous memory layout. * Likewise, a Ref can reference any column-major dense matrix expression of float whose column's elements are contiguously stored with * the possibility to have a constant space in-between each column, i.e. the inner stride must be equal to 1, but the outer stride (or leading dimension) * can be greater than the number of rows. * * In the const case, if the input expression does not match the above requirement, then it is evaluated into a temporary before being passed to the function. * Here are some examples: * \code * MatrixXf A; * VectorXf a; * foo1(a.head()); // OK * foo1(A.col()); // OK * foo1(A.row()); // Compilation error because here innerstride!=1 * foo2(A.row()); // Compilation error because A.row() is a 1xN object while foo2 is expecting a Nx1 object * foo2(A.row().transpose()); // The row is copied into a contiguous temporary * foo2(2*a); // The expression is evaluated into a temporary * foo2(A.col().segment(2,4)); // No temporary * \endcode * * The range of inputs that can be referenced without temporary can be enlarged using the last two template parameters. * Here is an example accepting an innerstride!=1: * \code * // in-out argument: * void foo3(Ref > x); * foo3(A.row()); // OK * \endcode * The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to exploit vectorization, and will involve more * expensive address computations even if the input is contiguously stored in memory. To overcome this issue, one might propose to overload internally calling a * template function, e.g.: * \code * // in the .h: * void foo(const Ref& A); * void foo(const Ref >& A); * * // in the .cpp: * template void foo_impl(const TypeOfA& A) { * ... // crazy code goes here * } * void foo(const Ref& A) { foo_impl(A); } * void foo(const Ref >& A) { foo_impl(A); } * \endcode * * See also the following stackoverflow questions for further references: * - Correct usage of the Eigen::Ref<> class * * \sa PlainObjectBase::Map(), \ref TopicStorageOrders */ template class Ref : public RefBase > { private: typedef internal::traits Traits; template EIGEN_DEVICE_FUNC inline Ref(const PlainObjectBase& expr, typename internal::enable_if::MatchAtCompileTime),Derived>::type* = 0); public: typedef RefBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(Ref) #ifndef EIGEN_PARSED_BY_DOXYGEN template EIGEN_DEVICE_FUNC inline Ref(PlainObjectBase& expr, typename internal::enable_if::MatchAtCompileTime),Derived>::type* = 0) { EIGEN_STATIC_ASSERT(bool(Traits::template match::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); // Construction must pass since we will not create temporary storage in the non-const case. const bool success = Base::construct(expr.derived()); EIGEN_UNUSED_VARIABLE(success) eigen_assert(success); } template EIGEN_DEVICE_FUNC inline Ref(const DenseBase& expr, typename internal::enable_if::MatchAtCompileTime),Derived>::type* = 0) #else /** Implicit constructor from any dense expression */ template inline Ref(DenseBase& expr) #endif { EIGEN_STATIC_ASSERT(bool(internal::is_lvalue::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); EIGEN_STATIC_ASSERT(bool(Traits::template match::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); EIGEN_STATIC_ASSERT(!Derived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); // Construction must pass since we will not create temporary storage in the non-const case. const bool success = Base::construct(expr.const_cast_derived()); EIGEN_UNUSED_VARIABLE(success) eigen_assert(success); } EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref) }; // this is the const ref version template class Ref : public RefBase > { public: typedef internal::traits Traits; typedef RefBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(Ref) template EIGEN_DEVICE_FUNC inline Ref(const DenseBase& expr, typename internal::enable_if::ScalarTypeMatch),Derived>::type* = 0) { // std::cout << match_helper::HasDirectAccess << "," << match_helper::OuterStrideMatch << "," << match_helper::InnerStrideMatch << "\n"; // std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n"; // std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n"; construct(expr.derived(), typename Traits::template match::type()); } EIGEN_DEVICE_FUNC inline Ref(const Ref& other) : Base(other) { // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy } template EIGEN_DEVICE_FUNC inline Ref(const RefBase& other) { construct(other.derived(), typename Traits::template match::type()); } protected: template EIGEN_DEVICE_FUNC void construct(const Expression& expr,internal::true_type) { // Check if we can use the underlying expr's storage directly, otherwise call the copy version. if (!Base::construct(expr)) { construct(expr, internal::false_type()); } } template EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::false_type) { internal::call_assignment_no_alias(m_object,expr,internal::assign_op()); Base::construct(m_object); } protected: TPlainObjectType m_object; }; } // end namespace Eigen #endif // EIGEN_REF_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Replicate.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_REPLICATE_H #define EIGEN_REPLICATE_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > : traits { typedef typename MatrixType::Scalar Scalar; typedef typename traits::StorageKind StorageKind; typedef typename traits::XprKind XprKind; typedef typename ref_selector::type MatrixTypeNested; typedef typename remove_reference::type _MatrixTypeNested; enum { RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic ? Dynamic : RowFactor * MatrixType::RowsAtCompileTime, ColsAtCompileTime = ColFactor==Dynamic || int(MatrixType::ColsAtCompileTime)==Dynamic ? Dynamic : ColFactor * MatrixType::ColsAtCompileTime, //FIXME we don't propagate the max sizes !!! MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1 : MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0 : (MatrixType::Flags & RowMajorBit) ? 1 : 0, // FIXME enable DirectAccess with negative strides? Flags = IsRowMajor ? RowMajorBit : 0 }; }; } /** * \class Replicate * \ingroup Core_Module * * \brief Expression of the multiple replication of a matrix or vector * * \tparam MatrixType the type of the object we are replicating * \tparam RowFactor number of repetitions at compile time along the vertical direction, can be Dynamic. * \tparam ColFactor number of repetitions at compile time along the horizontal direction, can be Dynamic. * * This class represents an expression of the multiple replication of a matrix or vector. * It is the return type of DenseBase::replicate() and most of the time * this is the only way it is used. * * \sa DenseBase::replicate() */ template class Replicate : public internal::dense_xpr_base< Replicate >::type { typedef typename internal::traits::MatrixTypeNested MatrixTypeNested; typedef typename internal::traits::_MatrixTypeNested _MatrixTypeNested; public: typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Replicate) typedef typename internal::remove_all::type NestedExpression; template EIGEN_DEVICE_FUNC inline explicit Replicate(const OriginalMatrixType& matrix) : m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor) { EIGEN_STATIC_ASSERT((internal::is_same::type,OriginalMatrixType>::value), THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) eigen_assert(RowFactor!=Dynamic && ColFactor!=Dynamic); } template EIGEN_DEVICE_FUNC inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor) : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor) { EIGEN_STATIC_ASSERT((internal::is_same::type,OriginalMatrixType>::value), THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); } EIGEN_DEVICE_FUNC const _MatrixTypeNested& nestedExpression() const { return m_matrix; } protected: MatrixTypeNested m_matrix; const internal::variable_if_dynamic m_rowFactor; const internal::variable_if_dynamic m_colFactor; }; /** * \return an expression of the replication of \c *this * * Example: \include MatrixBase_replicate.cpp * Output: \verbinclude MatrixBase_replicate.out * * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate */ template template EIGEN_DEVICE_FUNC const Replicate DenseBase::replicate() const { return Replicate(derived()); } /** * \return an expression of the replication of each column (or row) of \c *this * * Example: \include DirectionWise_replicate_int.cpp * Output: \verbinclude DirectionWise_replicate_int.out * * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate */ template EIGEN_DEVICE_FUNC const typename VectorwiseOp::ReplicateReturnType VectorwiseOp::replicate(Index factor) const { return typename VectorwiseOp::ReplicateReturnType (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1); } } // end namespace Eigen #endif // EIGEN_REPLICATE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Reshaped.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2017 Gael Guennebaud // Copyright (C) 2014 yoco // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_RESHAPED_H #define EIGEN_RESHAPED_H #include "./InternalHeaderCheck.h" namespace Eigen { /** \class Reshaped * \ingroup Core_Module * * \brief Expression of a fixed-size or dynamic-size reshape * * \tparam XprType the type of the expression in which we are taking a reshape * \tparam Rows the number of rows of the reshape we are taking at compile time (optional) * \tparam Cols the number of columns of the reshape we are taking at compile time (optional) * \tparam Order can be ColMajor or RowMajor, default is ColMajor. * * This class represents an expression of either a fixed-size or dynamic-size reshape. * It is the return type of DenseBase::reshaped(NRowsType,NColsType) and * most of the time this is the only way it is used. * * However, in C++98, if you want to directly maniputate reshaped expressions, * for instance if you want to write a function returning such an expression, you * will need to use this class. In C++11, it is advised to use the \em auto * keyword for such use cases. * * Here is an example illustrating the dynamic case: * \include class_Reshaped.cpp * Output: \verbinclude class_Reshaped.out * * Here is an example illustrating the fixed-size case: * \include class_FixedReshaped.cpp * Output: \verbinclude class_FixedReshaped.out * * \sa DenseBase::reshaped(NRowsType,NColsType) */ namespace internal { template struct traits > : traits { typedef typename traits::Scalar Scalar; typedef typename traits::StorageKind StorageKind; typedef typename traits::XprKind XprKind; enum{ MatrixRows = traits::RowsAtCompileTime, MatrixCols = traits::ColsAtCompileTime, RowsAtCompileTime = Rows, ColsAtCompileTime = Cols, MaxRowsAtCompileTime = Rows, MaxColsAtCompileTime = Cols, XpxStorageOrder = ((int(traits::Flags) & RowMajorBit) == RowMajorBit) ? RowMajor : ColMajor, ReshapedStorageOrder = (RowsAtCompileTime == 1 && ColsAtCompileTime != 1) ? RowMajor : (ColsAtCompileTime == 1 && RowsAtCompileTime != 1) ? ColMajor : XpxStorageOrder, HasSameStorageOrderAsXprType = (ReshapedStorageOrder == XpxStorageOrder), InnerSize = (ReshapedStorageOrder==int(RowMajor)) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), InnerStrideAtCompileTime = HasSameStorageOrderAsXprType ? int(inner_stride_at_compile_time::ret) : Dynamic, OuterStrideAtCompileTime = Dynamic, HasDirectAccess = internal::has_direct_access::ret && (Order==int(XpxStorageOrder)) && ((evaluator::Flags&LinearAccessBit)==LinearAccessBit), MaskPacketAccessBit = (InnerSize == Dynamic || (InnerSize % packet_traits::size) == 0) && (InnerStrideAtCompileTime == 1) ? PacketAccessBit : 0, //MaskAlignedBit = ((OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0, FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0, FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, FlagsRowMajorBit = (ReshapedStorageOrder==int(RowMajor)) ? RowMajorBit : 0, FlagsDirectAccessBit = HasDirectAccess ? DirectAccessBit : 0, Flags0 = traits::Flags & ( (HereditaryBits & ~RowMajorBit) | MaskPacketAccessBit), Flags = (Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit | FlagsDirectAccessBit) }; }; template class ReshapedImpl_dense; } // end namespace internal template class ReshapedImpl; template class Reshaped : public ReshapedImpl::StorageKind> { typedef ReshapedImpl::StorageKind> Impl; public: //typedef typename Impl::Base Base; typedef Impl Base; EIGEN_GENERIC_PUBLIC_INTERFACE(Reshaped) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reshaped) /** Fixed-size constructor */ EIGEN_DEVICE_FUNC inline Reshaped(XprType& xpr) : Impl(xpr) { EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE) eigen_assert(Rows * Cols == xpr.rows() * xpr.cols()); } /** Dynamic-size constructor */ EIGEN_DEVICE_FUNC inline Reshaped(XprType& xpr, Index reshapeRows, Index reshapeCols) : Impl(xpr, reshapeRows, reshapeCols) { eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==reshapeRows) && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==reshapeCols)); eigen_assert(reshapeRows * reshapeCols == xpr.rows() * xpr.cols()); } }; // The generic default implementation for dense reshape simply forward to the internal::ReshapedImpl_dense // that must be specialized for direct and non-direct access... template class ReshapedImpl : public internal::ReshapedImpl_dense >::HasDirectAccess> { typedef internal::ReshapedImpl_dense >::HasDirectAccess> Impl; public: typedef Impl Base; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl) EIGEN_DEVICE_FUNC inline ReshapedImpl(XprType& xpr) : Impl(xpr) {} EIGEN_DEVICE_FUNC inline ReshapedImpl(XprType& xpr, Index reshapeRows, Index reshapeCols) : Impl(xpr, reshapeRows, reshapeCols) {} }; namespace internal { /** \internal Internal implementation of dense Reshaped in the general case. */ template class ReshapedImpl_dense : public internal::dense_xpr_base >::type { typedef Reshaped ReshapedType; public: typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(ReshapedType) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl_dense) typedef typename internal::ref_selector::non_const_type MatrixTypeNested; typedef typename internal::remove_all::type NestedExpression; class InnerIterator; /** Fixed-size constructor */ EIGEN_DEVICE_FUNC inline ReshapedImpl_dense(XprType& xpr) : m_xpr(xpr), m_rows(Rows), m_cols(Cols) {} /** Dynamic-size constructor */ EIGEN_DEVICE_FUNC inline ReshapedImpl_dense(XprType& xpr, Index nRows, Index nCols) : m_xpr(xpr), m_rows(nRows), m_cols(nCols) {} EIGEN_DEVICE_FUNC Index rows() const { return m_rows; } EIGEN_DEVICE_FUNC Index cols() const { return m_cols; } #ifdef EIGEN_PARSED_BY_DOXYGEN /** \sa MapBase::data() */ EIGEN_DEVICE_FUNC inline const Scalar* data() const; EIGEN_DEVICE_FUNC inline Index innerStride() const; EIGEN_DEVICE_FUNC inline Index outerStride() const; #endif /** \returns the nested expression */ EIGEN_DEVICE_FUNC const typename internal::remove_all::type& nestedExpression() const { return m_xpr; } /** \returns the nested expression */ EIGEN_DEVICE_FUNC typename internal::remove_reference::type& nestedExpression() { return m_xpr; } protected: MatrixTypeNested m_xpr; const internal::variable_if_dynamic m_rows; const internal::variable_if_dynamic m_cols; }; /** \internal Internal implementation of dense Reshaped in the direct access case. */ template class ReshapedImpl_dense : public MapBase > { typedef Reshaped ReshapedType; typedef typename internal::ref_selector::non_const_type XprTypeNested; public: typedef MapBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(ReshapedType) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl_dense) /** Fixed-size constructor */ EIGEN_DEVICE_FUNC inline ReshapedImpl_dense(XprType& xpr) : Base(xpr.data()), m_xpr(xpr) {} /** Dynamic-size constructor */ EIGEN_DEVICE_FUNC inline ReshapedImpl_dense(XprType& xpr, Index nRows, Index nCols) : Base(xpr.data(), nRows, nCols), m_xpr(xpr) {} EIGEN_DEVICE_FUNC const typename internal::remove_all::type& nestedExpression() const { return m_xpr; } EIGEN_DEVICE_FUNC XprType& nestedExpression() { return m_xpr; } /** \sa MapBase::innerStride() */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const { return m_xpr.innerStride(); } /** \sa MapBase::outerStride() */ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const { return ((Flags&RowMajorBit)==RowMajorBit) ? this->cols() : this->rows(); } protected: XprTypeNested m_xpr; }; // Evaluators template struct reshaped_evaluator; template struct evaluator > : reshaped_evaluator >::HasDirectAccess> { typedef Reshaped XprType; typedef typename XprType::Scalar Scalar; // TODO: should check for smaller packet types typedef typename packet_traits::type PacketScalar; enum { CoeffReadCost = evaluator::CoeffReadCost, HasDirectAccess = traits::HasDirectAccess, // RowsAtCompileTime = traits::RowsAtCompileTime, // ColsAtCompileTime = traits::ColsAtCompileTime, // MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, // MaxColsAtCompileTime = traits::MaxColsAtCompileTime, // // InnerStrideAtCompileTime = traits::HasSameStorageOrderAsXprType // ? int(inner_stride_at_compile_time::ret) // : Dynamic, // OuterStrideAtCompileTime = Dynamic, FlagsLinearAccessBit = (traits::RowsAtCompileTime == 1 || traits::ColsAtCompileTime == 1 || HasDirectAccess) ? LinearAccessBit : 0, FlagsRowMajorBit = (traits::ReshapedStorageOrder==int(RowMajor)) ? RowMajorBit : 0, FlagsDirectAccessBit = HasDirectAccess ? DirectAccessBit : 0, Flags0 = evaluator::Flags & (HereditaryBits & ~RowMajorBit), Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit | FlagsDirectAccessBit, PacketAlignment = unpacket_traits::alignment, Alignment = evaluator::Alignment }; typedef reshaped_evaluator reshaped_evaluator_type; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : reshaped_evaluator_type(xpr) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } }; template struct reshaped_evaluator : evaluator_base > { typedef Reshaped XprType; enum { CoeffReadCost = evaluator::CoeffReadCost /* TODO + cost of index computations */, Flags = (evaluator::Flags & (HereditaryBits /*| LinearAccessBit | DirectAccessBit*/)), Alignment = 0 }; EIGEN_DEVICE_FUNC explicit reshaped_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_xpr(xpr) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef std::pair RowCol; inline RowCol index_remap(Index rowId, Index colId) const { if(Order==ColMajor) { const Index nth_elem_idx = colId * m_xpr.rows() + rowId; return RowCol(nth_elem_idx % m_xpr.nestedExpression().rows(), nth_elem_idx / m_xpr.nestedExpression().rows()); } else { const Index nth_elem_idx = colId + rowId * m_xpr.cols(); return RowCol(nth_elem_idx / m_xpr.nestedExpression().cols(), nth_elem_idx % m_xpr.nestedExpression().cols()); } } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index rowId, Index colId) { EIGEN_STATIC_ASSERT_LVALUE(XprType) const RowCol row_col = index_remap(rowId, colId); return m_argImpl.coeffRef(row_col.first, row_col.second); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { const RowCol row_col = index_remap(rowId, colId); return m_argImpl.coeffRef(row_col.first, row_col.second); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const { const RowCol row_col = index_remap(rowId, colId); return m_argImpl.coeff(row_col.first, row_col.second); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) { EIGEN_STATIC_ASSERT_LVALUE(XprType) const RowCol row_col = index_remap(Rows == 1 ? 0 : index, Rows == 1 ? index : 0); return m_argImpl.coeffRef(row_col.first, row_col.second); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { const RowCol row_col = index_remap(Rows == 1 ? 0 : index, Rows == 1 ? index : 0); return m_argImpl.coeffRef(row_col.first, row_col.second); } EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const { const RowCol row_col = index_remap(Rows == 1 ? 0 : index, Rows == 1 ? index : 0); return m_argImpl.coeff(row_col.first, row_col.second); } #if 0 EIGEN_DEVICE_FUNC template inline PacketScalar packet(Index rowId, Index colId) const { const RowCol row_col = index_remap(rowId, colId); return m_argImpl.template packet(row_col.first, row_col.second); } template EIGEN_DEVICE_FUNC inline void writePacket(Index rowId, Index colId, const PacketScalar& val) { const RowCol row_col = index_remap(rowId, colId); m_argImpl.const_cast_derived().template writePacket (row_col.first, row_col.second, val); } template EIGEN_DEVICE_FUNC inline PacketScalar packet(Index index) const { const RowCol row_col = index_remap(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); return m_argImpl.template packet(row_col.first, row_col.second); } template EIGEN_DEVICE_FUNC inline void writePacket(Index index, const PacketScalar& val) { const RowCol row_col = index_remap(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); return m_argImpl.template packet(row_col.first, row_col.second, val); } #endif protected: evaluator m_argImpl; const XprType& m_xpr; }; template struct reshaped_evaluator : mapbase_evaluator, typename Reshaped::PlainObject> { typedef Reshaped XprType; typedef typename XprType::Scalar Scalar; EIGEN_DEVICE_FUNC explicit reshaped_evaluator(const XprType& xpr) : mapbase_evaluator(xpr) { // TODO: for the 3.4 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime eigen_assert(((internal::UIntPtr(xpr.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator::Alignment)) == 0) && "data is not aligned"); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_RESHAPED_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/ReturnByValue.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud // Copyright (C) 2009-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_RETURNBYVALUE_H #define EIGEN_RETURNBYVALUE_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > : public traits::ReturnType> { enum { // We're disabling the DirectAccess because e.g. the constructor of // the Block-with-DirectAccess expression requires to have a coeffRef method. // Also, we don't want to have to implement the stride stuff. Flags = (traits::ReturnType>::Flags | EvalBeforeNestingBit) & ~DirectAccessBit }; }; /* The ReturnByValue object doesn't even have a coeff() method. * So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix. * So internal::nested always gives the plain return matrix type. * * FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ?? * Answer: EvalBeforeNestingBit should be deprecated since we have the evaluators */ template struct nested_eval, n, PlainObject> { typedef typename traits::ReturnType type; }; } // end namespace internal /** \class ReturnByValue * \ingroup Core_Module * */ template class ReturnByValue : public internal::dense_xpr_base< ReturnByValue >::type, internal::no_assignment_operator { public: typedef typename internal::traits::ReturnType ReturnType; typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue) template EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const { static_cast(this)->evalTo(dst); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return static_cast(this)->rows(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return static_cast(this)->cols(); } #ifndef EIGEN_PARSED_BY_DOXYGEN #define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT class Unusable{ Unusable(const Unusable&) {} Unusable& operator=(const Unusable&) {return *this;} }; const Unusable& coeff(Index) const { return *reinterpret_cast(this); } const Unusable& coeff(Index,Index) const { return *reinterpret_cast(this); } Unusable& coeffRef(Index) { return *reinterpret_cast(this); } Unusable& coeffRef(Index,Index) { return *reinterpret_cast(this); } #undef Unusable #endif }; template template EIGEN_DEVICE_FUNC Derived& DenseBase::operator=(const ReturnByValue& other) { other.evalTo(derived()); return derived(); } namespace internal { // Expression is evaluated in a temporary; default implementation of Assignment is bypassed so that // when a ReturnByValue expression is assigned, the evaluator is not constructed. // TODO: Finalize port to new regime; ReturnByValue should not exist in the expression world template struct evaluator > : public evaluator::ReturnType> { typedef ReturnByValue XprType; typedef typename internal::traits::ReturnType PlainObject; typedef evaluator Base; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : m_result(xpr.rows(), xpr.cols()) { ::new (static_cast(this)) Base(m_result); xpr.evalTo(m_result); } protected: PlainObject m_result; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_RETURNBYVALUE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Reverse.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2009 Ricard Marxer // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_REVERSE_H #define EIGEN_REVERSE_H #include "./InternalHeaderCheck.h" namespace Eigen { namespace internal { template struct traits > : traits { typedef typename MatrixType::Scalar Scalar; typedef typename traits::StorageKind StorageKind; typedef typename traits::XprKind XprKind; typedef typename ref_selector::type MatrixTypeNested; typedef typename remove_reference::type _MatrixTypeNested; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, Flags = _MatrixTypeNested::Flags & (RowMajorBit | LvalueBit) }; }; template struct reverse_packet_cond { static inline PacketType run(const PacketType& x) { return preverse(x); } }; template struct reverse_packet_cond { static inline PacketType run(const PacketType& x) { return x; } }; } // end namespace internal /** \class Reverse * \ingroup Core_Module * * \brief Expression of the reverse of a vector or matrix * * \tparam MatrixType the type of the object of which we are taking the reverse * \tparam Direction defines the direction of the reverse operation, can be Vertical, Horizontal, or BothDirections * * This class represents an expression of the reverse of a vector. * It is the return type of MatrixBase::reverse() and VectorwiseOp::reverse() * and most of the time this is the only way it is used. * * \sa MatrixBase::reverse(), VectorwiseOp::reverse() */ template class Reverse : public internal::dense_xpr_base< Reverse >::type { public: typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Reverse) typedef typename internal::remove_all::type NestedExpression; using Base::IsRowMajor; protected: enum { PacketSize = internal::packet_traits::size, IsColMajor = !IsRowMajor, ReverseRow = (Direction == Vertical) || (Direction == BothDirections), ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1, ReversePacket = (Direction == BothDirections) || ((Direction == Vertical) && IsColMajor) || ((Direction == Horizontal) && IsRowMajor) }; typedef internal::reverse_packet_cond reverse_packet; public: EIGEN_DEVICE_FUNC explicit inline Reverse(const MatrixType& matrix) : m_matrix(matrix) { } EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse) EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); } EIGEN_DEVICE_FUNC inline Index innerStride() const { return -m_matrix.innerStride(); } EIGEN_DEVICE_FUNC const typename internal::remove_all::type& nestedExpression() const { return m_matrix; } protected: typename MatrixType::Nested m_matrix; }; /** \returns an expression of the reverse of *this. * * Example: \include MatrixBase_reverse.cpp * Output: \verbinclude MatrixBase_reverse.out * */ template EIGEN_DEVICE_FUNC inline typename DenseBase::ReverseReturnType DenseBase::reverse() { return ReverseReturnType(derived()); } //reverse const overload moved DenseBase.h due to a CUDA compiler bug /** This is the "in place" version of reverse: it reverses \c *this. * * In most cases it is probably better to simply use the reversed expression * of a matrix. However, when reversing the matrix data itself is really needed, * then this "in-place" version is probably the right choice because it provides * the following additional benefits: * - less error prone: doing the same operation with .reverse() requires special care: * \code m = m.reverse().eval(); \endcode * - this API enables reverse operations without the need for a temporary * - it allows future optimizations (cache friendliness, etc.) * * \sa VectorwiseOp::reverseInPlace(), reverse() */ template EIGEN_DEVICE_FUNC inline void DenseBase::reverseInPlace() { if(cols()>rows()) { Index half = cols()/2; leftCols(half).swap(rightCols(half).reverse()); if((cols()%2)==1) { Index half2 = rows()/2; col(half).head(half2).swap(col(half).tail(half2).reverse()); } } else { Index half = rows()/2; topRows(half).swap(bottomRows(half).reverse()); if((rows()%2)==1) { Index half2 = cols()/2; row(half).head(half2).swap(row(half).tail(half2).reverse()); } } } namespace internal { template struct vectorwise_reverse_inplace_impl; template<> struct vectorwise_reverse_inplace_impl { template static void run(ExpressionType &xpr) { const int HalfAtCompileTime = ExpressionType::RowsAtCompileTime==Dynamic?Dynamic:ExpressionType::RowsAtCompileTime/2; Index half = xpr.rows()/2; xpr.topRows(fix(half)) .swap(xpr.bottomRows(fix(half)).colwise().reverse()); } }; template<> struct vectorwise_reverse_inplace_impl { template static void run(ExpressionType &xpr) { const int HalfAtCompileTime = ExpressionType::ColsAtCompileTime==Dynamic?Dynamic:ExpressionType::ColsAtCompileTime/2; Index half = xpr.cols()/2; xpr.leftCols(fix(half)) .swap(xpr.rightCols(fix(half)).rowwise().reverse()); } }; } // end namespace internal /** This is the "in place" version of VectorwiseOp::reverse: it reverses each column or row of \c *this. * * In most cases it is probably better to simply use the reversed expression * of a matrix. However, when reversing the matrix data itself is really needed, * then this "in-place" version is probably the right choice because it provides * the following additional benefits: * - less error prone: doing the same operation with .reverse() requires special care: * \code m = m.reverse().eval(); \endcode * - this API enables reverse operations without the need for a temporary * * \sa DenseBase::reverseInPlace(), reverse() */ template EIGEN_DEVICE_FUNC void VectorwiseOp::reverseInPlace() { internal::vectorwise_reverse_inplace_impl::run(m_matrix); } } // end namespace Eigen #endif // EIGEN_REVERSE_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/Eigen/src/Core/Select.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SELECT_H #define EIGEN_SELECT_H #include "./InternalHeaderCheck.h" namespace Eigen { /** \class Select * \ingroup Core_Module * * \brief Expression of a coefficient wise version of the C++ ternary operator ?: * * \param ConditionMatrixType the type of the \em condition expression which must be a boolean matrix * \param ThenMatrixType the type of the \em then expression * \param ElseMatrixType the type of the \em else expression * * This class represents an expression of a coefficient wise version of the C++ ternary operator ?:. * It is the return type of DenseBase::select() and most of the time this is the only way it is used. * * \sa DenseBase::select(const DenseBase&, const DenseBase&) const */ namespace internal { template struct traits > : traits { typedef typename traits::Scalar Scalar; typedef Dense StorageKind; typedef typename traits::XprKind XprKind; typedef typename ConditionMatrixType::Nested ConditionMatrixNested; typedef typename ThenMatrixType::Nested ThenMatrixNested; typedef typename ElseMatrixType::Nested ElseMatrixNested; enum { RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime, ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime, Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & RowMajorBit }; }; } template class Select : public internal::dense_xpr_base< Select >::type, internal::no_assignment_operator { public: typedef typename internal::dense_xpr_base" << endl; cerr << "available actions:" << endl; for (auto it = available_actions.begin(); it != available_actions.end(); ++it) { cerr << " " << (*it)->invokation_name() << endl; } cerr << "the input files should each contain an output of benchmark-blocking-sizes" << endl; exit(1); } int main(int argc, char* argv[]) { cout.precision(default_precision); cerr.precision(default_precision); vector> available_actions; available_actions.emplace_back(new partition_action_t); available_actions.emplace_back(new evaluate_defaults_action_t); vector input_filenames; action_t* action = nullptr; if (argc < 2) { show_usage_and_exit(argc, argv, available_actions); } for (int i = 1; i < argc; i++) { bool arg_handled = false; // Step 1. Try to match action invocation names. for (auto it = available_actions.begin(); it != available_actions.end(); ++it) { if (!strcmp(argv[i], (*it)->invokation_name())) { if (!action) { action = it->get(); arg_handled = true; break; } else { cerr << "can't specify more than one action!" << endl; show_usage_and_exit(argc, argv, available_actions); } } } if (arg_handled) { continue; } // Step 2. Try to match option names. if (argv[i][0] == '-') { if (!strcmp(argv[i], "--only-cubic-sizes")) { only_cubic_sizes = true; arg_handled = true; } if (!strcmp(argv[i], "--dump-tables")) { dump_tables = true; arg_handled = true; } if (!arg_handled) { cerr << "Unrecognized option: " << argv[i] << endl; show_usage_and_exit(argc, argv, available_actions); } } if (arg_handled) { continue; } // Step 3. Default to interpreting args as input filenames. input_filenames.emplace_back(argv[i]); } if (dump_tables && only_cubic_sizes) { cerr << "Incompatible options: --only-cubic-sizes and --dump-tables." << endl; show_usage_and_exit(argc, argv, available_actions); } if (!action) { show_usage_and_exit(argc, argv, available_actions); } action->run(input_filenames); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/basicbench.cxxlist ================================================ #!/bin/bash # CLIST[((g++))]="g++-3.4 -O3 -DNDEBUG" # CLIST[((g++))]="g++-3.4 -O3 -DNDEBUG -finline-limit=20000" # CLIST[((g++))]="g++-4.1 -O3 -DNDEBUG" #CLIST[((g++))]="g++-4.1 -O3 -DNDEBUG -finline-limit=20000" # CLIST[((g++))]="g++-4.2 -O3 -DNDEBUG" #CLIST[((g++))]="g++-4.2 -O3 -DNDEBUG -finline-limit=20000" # CLIST[((g++))]="g++-4.2 -O3 -DNDEBUG -finline-limit=20000 -fprofile-generate" # CLIST[((g++))]="g++-4.2 -O3 -DNDEBUG -finline-limit=20000 -fprofile-use" # CLIST[((g++))]="g++-4.3 -O3 -DNDEBUG" #CLIST[((g++))]="g++-4.3 -O3 -DNDEBUG -finline-limit=20000" # CLIST[((g++))]="g++-4.3 -O3 -DNDEBUG -finline-limit=20000 -fprofile-generate" # CLIST[((g++))]="g++-4.3 -O3 -DNDEBUG -finline-limit=20000 -fprofile-use" # CLIST[((g++))]="icpc -fast -DNDEBUG -fno-exceptions -no-inline-max-size -prof-genx" # CLIST[((g++))]="icpc -fast -DNDEBUG -fno-exceptions -no-inline-max-size -prof-use" #CLIST[((g++))]="/opt/intel/Compiler/11.1/072/bin/intel64/icpc -fast -DNDEBUG -fno-exceptions -no-inline-max-size -lrt" CLIST[((g++))]="/home/orzel/svn/llvm/Release/bin/clang++ -O3 -DNDEBUG -DEIGEN_DONT_VECTORIZE -lrt" CLIST[((g++))]="/home/orzel/svn/llvm/Release/bin/clang++ -O3 -DNDEBUG -lrt" CLIST[((g++))]="g++-4.4.4 -O3 -DNDEBUG -DEIGEN_DONT_VECTORIZE -lrt" CLIST[((g++))]="g++-4.4.4 -O3 -DNDEBUG -lrt" CLIST[((g++))]="g++-4.5.0 -O3 -DNDEBUG -DEIGEN_DONT_VECTORIZE -lrt" CLIST[((g++))]="g++-4.5.0 -O3 -DNDEBUG -lrt" ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/basicbenchmark.cpp ================================================ #include #include "BenchUtil.h" #include "basicbenchmark.h" int main(int argc, char *argv[]) { DISABLE_SSE_EXCEPTIONS(); // this is the list of matrix type and size we want to bench: // ((suffix) (matrix size) (number of iterations)) #define MODES ((3d)(3)(4000000)) ((4d)(4)(1000000)) ((Xd)(4)(1000000)) ((Xd)(20)(10000)) // #define MODES ((Xd)(20)(10000)) #define _GENERATE_HEADER(R,ARG,EL) << BOOST_PP_STRINGIZE(BOOST_PP_SEQ_HEAD(EL)) << "-" \ << BOOST_PP_STRINGIZE(BOOST_PP_SEQ_ELEM(1,EL)) << "x" \ << BOOST_PP_STRINGIZE(BOOST_PP_SEQ_ELEM(1,EL)) << " / " std::cout BOOST_PP_SEQ_FOR_EACH(_GENERATE_HEADER, ~, MODES ) << endl; const int tries = 10; #define _RUN_BENCH(R,ARG,EL) \ std::cout << ARG( \ BOOST_PP_CAT(Matrix, BOOST_PP_SEQ_HEAD(EL)) (\ BOOST_PP_SEQ_ELEM(1,EL),BOOST_PP_SEQ_ELEM(1,EL)), BOOST_PP_SEQ_ELEM(2,EL), tries) \ << " "; BOOST_PP_SEQ_FOR_EACH(_RUN_BENCH, benchBasic, MODES ); std::cout << endl; BOOST_PP_SEQ_FOR_EACH(_RUN_BENCH, benchBasic, MODES ); std::cout << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/basicbenchmark.h ================================================ #ifndef EIGEN_BENCH_BASICBENCH_H #define EIGEN_BENCH_BASICBENCH_H enum {LazyEval, EarlyEval, OmpEval}; template void benchBasic_loop(const MatrixType& I, MatrixType& m, int iterations) __attribute__((noinline)); template void benchBasic_loop(const MatrixType& I, MatrixType& m, int iterations) { for(int a = 0; a < iterations; a++) { if (Mode==LazyEval) { asm("#begin_bench_loop LazyEval"); if (MatrixType::SizeAtCompileTime!=Eigen::Dynamic) asm("#fixedsize"); m = (I + 0.00005 * (m + m.lazyProduct(m))).eval(); } else if (Mode==OmpEval) { asm("#begin_bench_loop OmpEval"); if (MatrixType::SizeAtCompileTime!=Eigen::Dynamic) asm("#fixedsize"); m = (I + 0.00005 * (m + m.lazyProduct(m))).eval(); } else { asm("#begin_bench_loop EarlyEval"); if (MatrixType::SizeAtCompileTime!=Eigen::Dynamic) asm("#fixedsize"); m = I + 0.00005 * (m + m * m); } asm("#end_bench_loop"); } } template double benchBasic(const MatrixType& mat, int size, int tries) __attribute__((noinline)); template double benchBasic(const MatrixType& mat, int iterations, int tries) { const int rows = mat.rows(); const int cols = mat.cols(); MatrixType I(rows,cols); MatrixType m(rows,cols); initMatrix_identity(I); Eigen::BenchTimer timer; for(uint t=0; t(I, m, iterations); timer.stop(); cerr << m; } return timer.value(); }; #endif // EIGEN_BENCH_BASICBENCH_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/benchBlasGemm.cpp ================================================ // g++ -O3 -DNDEBUG -I.. -L /usr/lib64/atlas/ benchBlasGemm.cpp -o benchBlasGemm -lrt -lcblas // possible options: // -DEIGEN_DONT_VECTORIZE // -msse2 // #define EIGEN_DEFAULT_TO_ROW_MAJOR #define _FLOAT #include #include #include "BenchTimer.h" // include the BLAS headers extern "C" { #include } #include #ifdef _FLOAT typedef float Scalar; #define CBLAS_GEMM cblas_sgemm #else typedef double Scalar; #define CBLAS_GEMM cblas_dgemm #endif typedef Eigen::Matrix MyMatrix; void bench_eigengemm(MyMatrix& mc, const MyMatrix& ma, const MyMatrix& mb, int nbloops); void check_product(int M, int N, int K); void check_product(void); int main(int argc, char *argv[]) { // disable SSE exceptions #ifdef __GNUC__ { int aux; asm( "stmxcsr %[aux] \n\t" "orl $32832, %[aux] \n\t" "ldmxcsr %[aux] \n\t" : : [aux] "m" (aux)); } #endif int nbtries=1, nbloops=1, M, N, K; if (argc==2) { if (std::string(argv[1])=="check") check_product(); else M = N = K = atoi(argv[1]); } else if ((argc==3) && (std::string(argv[1])=="auto")) { M = N = K = atoi(argv[2]); nbloops = 1000000000/(M*M*M); if (nbloops<1) nbloops = 1; nbtries = 6; } else if (argc==4) { M = N = K = atoi(argv[1]); nbloops = atoi(argv[2]); nbtries = atoi(argv[3]); } else if (argc==6) { M = atoi(argv[1]); N = atoi(argv[2]); K = atoi(argv[3]); nbloops = atoi(argv[4]); nbtries = atoi(argv[5]); } else { std::cout << "Usage: " << argv[0] << " size \n"; std::cout << "Usage: " << argv[0] << " auto size\n"; std::cout << "Usage: " << argv[0] << " size nbloops nbtries\n"; std::cout << "Usage: " << argv[0] << " M N K nbloops nbtries\n"; std::cout << "Usage: " << argv[0] << " check\n"; std::cout << "Options:\n"; std::cout << " size unique size of the 2 matrices (integer)\n"; std::cout << " auto automatically set the number of repetitions and tries\n"; std::cout << " nbloops number of times the GEMM routines is executed\n"; std::cout << " nbtries number of times the loop is benched (return the best try)\n"; std::cout << " M N K sizes of the matrices: MxN = MxK * KxN (integers)\n"; std::cout << " check check eigen product using cblas as a reference\n"; exit(1); } double nbmad = double(M) * double(N) * double(K) * double(nbloops); if (!(std::string(argv[1])=="auto")) std::cout << M << " x " << N << " x " << K << "\n"; Scalar alpha, beta; MyMatrix ma(M,K), mb(K,N), mc(M,N); ma = MyMatrix::Random(M,K); mb = MyMatrix::Random(K,N); mc = MyMatrix::Random(M,N); Eigen::BenchTimer timer; // we simply compute c += a*b, so: alpha = 1; beta = 1; // bench cblas // ROWS_A, COLS_B, COLS_A, 1.0, A, COLS_A, B, COLS_B, 0.0, C, COLS_B); if (!(std::string(argv[1])=="auto")) { timer.reset(); for (uint k=0 ; k(1,64); N = internal::random(1,768); K = internal::random(1,768); M = (0 + M) * 1; std::cout << M << " x " << N << " x " << K << "\n"; check_product(M, N, K); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/benchCholesky.cpp ================================================ // g++ -DNDEBUG -O3 -I.. benchCholesky.cpp -o benchCholesky && ./benchCholesky // options: // -DBENCH_GSL -lgsl /usr/lib/libcblas.so.3 // -DEIGEN_DONT_VECTORIZE // -msse2 // -DREPEAT=100 // -DTRIES=10 // -DSCALAR=double #include #include #include #include using namespace Eigen; #ifndef REPEAT #define REPEAT 10000 #endif #ifndef TRIES #define TRIES 10 #endif typedef float Scalar; template __attribute__ ((noinline)) void benchLLT(const MatrixType& m) { int rows = m.rows(); int cols = m.cols(); double cost = 0; for (int j=0; j SquareMatrixType; MatrixType a = MatrixType::Random(rows,cols); SquareMatrixType covMat = a * a.adjoint(); BenchTimer timerNoSqrt, timerSqrt; Scalar acc = 0; int r = internal::random(0,covMat.rows()-1); int c = internal::random(0,covMat.cols()-1); for (int t=0; t cholnosqrt(covMat); acc += cholnosqrt.matrixL().coeff(r,c); } timerNoSqrt.stop(); } for (int t=0; t chol(covMat); acc += chol.matrixL().coeff(r,c); } timerSqrt.stop(); } if (MatrixType::RowsAtCompileTime==Dynamic) std::cout << "dyn "; else std::cout << "fixed "; std::cout << covMat.rows() << " \t" << (timerNoSqrt.best()) / repeats << "s " << "(" << 1e-9 * cost*repeats/timerNoSqrt.best() << " GFLOPS)\t" << (timerSqrt.best()) / repeats << "s " << "(" << 1e-9 * cost*repeats/timerSqrt.best() << " GFLOPS)\n"; #ifdef BENCH_GSL if (MatrixType::RowsAtCompileTime==Dynamic) { timerSqrt.reset(); gsl_matrix* gslCovMat = gsl_matrix_alloc(covMat.rows(),covMat.cols()); gsl_matrix* gslCopy = gsl_matrix_alloc(covMat.rows(),covMat.cols()); eiToGsl(covMat, &gslCovMat); for (int t=0; t0; ++i) benchLLT(Matrix(dynsizes[i],dynsizes[i])); benchLLT(Matrix()); benchLLT(Matrix()); benchLLT(Matrix()); benchLLT(Matrix()); benchLLT(Matrix()); benchLLT(Matrix()); benchLLT(Matrix()); benchLLT(Matrix()); benchLLT(Matrix()); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/benchEigenSolver.cpp ================================================ // g++ -DNDEBUG -O3 -I.. benchEigenSolver.cpp -o benchEigenSolver && ./benchEigenSolver // options: // -DBENCH_GMM // -DBENCH_GSL -lgsl /usr/lib/libcblas.so.3 // -DEIGEN_DONT_VECTORIZE // -msse2 // -DREPEAT=100 // -DTRIES=10 // -DSCALAR=double #include #include #include #include using namespace Eigen; #ifndef REPEAT #define REPEAT 1000 #endif #ifndef TRIES #define TRIES 4 #endif #ifndef SCALAR #define SCALAR float #endif typedef SCALAR Scalar; template __attribute__ ((noinline)) void benchEigenSolver(const MatrixType& m) { int rows = m.rows(); int cols = m.cols(); int stdRepeats = std::max(1,int((REPEAT*1000)/(rows*rows*sqrt(rows)))); int saRepeats = stdRepeats * 4; typedef typename MatrixType::Scalar Scalar; typedef Matrix SquareMatrixType; MatrixType a = MatrixType::Random(rows,cols); SquareMatrixType covMat = a * a.adjoint(); BenchTimer timerSa, timerStd; Scalar acc = 0; int r = internal::random(0,covMat.rows()-1); int c = internal::random(0,covMat.cols()-1); { SelfAdjointEigenSolver ei(covMat); for (int t=0; t ei(covMat); for (int t=0; t gmmCovMat(covMat.rows(),covMat.cols()); gmm::dense_matrix eigvect(covMat.rows(),covMat.cols()); std::vector eigval(covMat.rows()); eiToGmm(covMat, gmmCovMat); for (int t=0; t0; ++i) benchEigenSolver(Matrix(dynsizes[i],dynsizes[i])); benchEigenSolver(Matrix()); benchEigenSolver(Matrix()); benchEigenSolver(Matrix()); benchEigenSolver(Matrix()); benchEigenSolver(Matrix()); benchEigenSolver(Matrix()); benchEigenSolver(Matrix()); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/benchFFT.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Mark Borgerding mark a borgerding net // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include #include #include #include #include #include using namespace Eigen; using namespace std; template string nameof(); template <> string nameof() {return "float";} template <> string nameof() {return "double";} template <> string nameof() {return "long double";} #ifndef TYPE #define TYPE float #endif #ifndef NFFT #define NFFT 1024 #endif #ifndef NDATA #define NDATA 1000000 #endif using namespace Eigen; template void bench(int nfft,bool fwd,bool unscaled=false, bool halfspec=false) { typedef typename NumTraits::Real Scalar; typedef typename std::complex Complex; int nits = NDATA/nfft; vector inbuf(nfft); vector outbuf(nfft); FFT< Scalar > fft; if (unscaled) { fft.SetFlag(fft.Unscaled); cout << "unscaled "; } if (halfspec) { fft.SetFlag(fft.HalfSpectrum); cout << "halfspec "; } std::fill(inbuf.begin(),inbuf.end(),0); fft.fwd( outbuf , inbuf); BenchTimer timer; timer.reset(); for (int k=0;k<8;++k) { timer.start(); if (fwd) for(int i = 0; i < nits; i++) fft.fwd( outbuf , inbuf); else for(int i = 0; i < nits; i++) fft.inv(inbuf,outbuf); timer.stop(); } cout << nameof() << " "; double mflops = 5.*nfft*log2((double)nfft) / (1e6 * timer.value() / (double)nits ); if ( NumTraits::IsComplex ) { cout << "complex"; }else{ cout << "real "; mflops /= 2; } if (fwd) cout << " fwd"; else cout << " inv"; cout << " NFFT=" << nfft << " " << (double(1e-6*nfft*nits)/timer.value()) << " MS/s " << mflops << "MFLOPS\n"; } int main(int argc,char ** argv) { bench >(NFFT,true); bench >(NFFT,false); bench(NFFT,true); bench(NFFT,false); bench(NFFT,false,true); bench(NFFT,false,true,true); bench >(NFFT,true); bench >(NFFT,false); bench(NFFT,true); bench(NFFT,false); bench >(NFFT,true); bench >(NFFT,false); bench(NFFT,true); bench(NFFT,false); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/benchGeometry.cpp ================================================ #include #include #include #include #include using namespace Eigen; using namespace std; #ifndef REPEAT #define REPEAT 1000000 #endif enum func_opt { TV, TMATV, TMATVMAT, }; template struct func; template struct func { static EIGEN_DONT_INLINE res run( arg1& a1, arg2& a2 ) { asm (""); return a1 * a2; } }; template struct func { static EIGEN_DONT_INLINE res run( arg1& a1, arg2& a2 ) { asm (""); return a1.matrix() * a2; } }; template struct func { static EIGEN_DONT_INLINE res run( arg1& a1, arg2& a2 ) { asm (""); return res(a1.matrix() * a2.matrix()); } }; template struct test_transform { static void run() { arg1 a1; a1.setIdentity(); arg2 a2; a2.setIdentity(); BenchTimer timer; timer.reset(); for (int k=0; k<10; ++k) { timer.start(); for (int k=0; k Trans;\ typedef Matrix Vec;\ typedef func Func;\ test_transform< Func, Trans, Vec >::run();\ } #define run_trans( op, scalar, mode, option ) \ std::cout << #scalar << "\t " << #mode << "\t " << #option << " "; \ {\ typedef Transform Trans;\ typedef func Func;\ test_transform< Func, Trans, Trans >::run();\ } int main(int argc, char* argv[]) { cout << "vec = trans * vec" << endl; run_vec(TV, float, Isometry, AutoAlign, 3); run_vec(TV, float, Isometry, DontAlign, 3); run_vec(TV, float, Isometry, AutoAlign, 4); run_vec(TV, float, Isometry, DontAlign, 4); run_vec(TV, float, Projective, AutoAlign, 4); run_vec(TV, float, Projective, DontAlign, 4); run_vec(TV, double, Isometry, AutoAlign, 3); run_vec(TV, double, Isometry, DontAlign, 3); run_vec(TV, double, Isometry, AutoAlign, 4); run_vec(TV, double, Isometry, DontAlign, 4); run_vec(TV, double, Projective, AutoAlign, 4); run_vec(TV, double, Projective, DontAlign, 4); cout << "vec = trans.matrix() * vec" << endl; run_vec(TMATV, float, Isometry, AutoAlign, 4); run_vec(TMATV, float, Isometry, DontAlign, 4); run_vec(TMATV, double, Isometry, AutoAlign, 4); run_vec(TMATV, double, Isometry, DontAlign, 4); cout << "trans = trans1 * trans" << endl; run_trans(TV, float, Isometry, AutoAlign); run_trans(TV, float, Isometry, DontAlign); run_trans(TV, double, Isometry, AutoAlign); run_trans(TV, double, Isometry, DontAlign); run_trans(TV, float, Projective, AutoAlign); run_trans(TV, float, Projective, DontAlign); run_trans(TV, double, Projective, AutoAlign); run_trans(TV, double, Projective, DontAlign); cout << "trans = trans1.matrix() * trans.matrix()" << endl; run_trans(TMATVMAT, float, Isometry, AutoAlign); run_trans(TMATVMAT, float, Isometry, DontAlign); run_trans(TMATVMAT, double, Isometry, AutoAlign); run_trans(TMATVMAT, double, Isometry, DontAlign); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/benchVecAdd.cpp ================================================ #include #include #include using namespace Eigen; #ifndef SIZE #define SIZE 50 #endif #ifndef REPEAT #define REPEAT 10000 #endif typedef float Scalar; __attribute__ ((noinline)) void benchVec(Scalar* a, Scalar* b, Scalar* c, int size); __attribute__ ((noinline)) void benchVec(MatrixXf& a, MatrixXf& b, MatrixXf& c); __attribute__ ((noinline)) void benchVec(VectorXf& a, VectorXf& b, VectorXf& c); int main(int argc, char* argv[]) { int size = SIZE * 8; int size2 = size * size; Scalar* a = internal::aligned_new(size2); Scalar* b = internal::aligned_new(size2+4)+1; Scalar* c = internal::aligned_new(size2); for (int i=0; i2 ; --innersize) { if (size2%innersize==0) { int outersize = size2/innersize; MatrixXf ma = Map(a, innersize, outersize ); MatrixXf mb = Map(b, innersize, outersize ); MatrixXf mc = Map(c, innersize, outersize ); timer.reset(); for (int k=0; k<3; ++k) { timer.start(); benchVec(ma, mb, mc); timer.stop(); } std::cout << innersize << " x " << outersize << " " << timer.value() << "s " << (double(size2*REPEAT)/timer.value())/(1024.*1024.*1024.) << " GFlops\n"; } } VectorXf va = Map(a, size2); VectorXf vb = Map(b, size2); VectorXf vc = Map(c, size2); timer.reset(); for (int k=0; k<3; ++k) { timer.start(); benchVec(va, vb, vc); timer.stop(); } std::cout << timer.value() << "s " << (double(size2*REPEAT)/timer.value())/(1024.*1024.*1024.) << " GFlops\n"; return 0; } void benchVec(MatrixXf& a, MatrixXf& b, MatrixXf& c) { for (int k=0; k::type PacketScalar; const int PacketSize = internal::packet_traits::size; PacketScalar a0, a1, a2, a3, b0, b1, b2, b3; for (int k=0; k // -DSCALARA=double or -DSCALARB=double // -DHAVE_BLAS // -DDECOUPLED // #include #include #include using namespace std; using namespace Eigen; #ifndef SCALAR // #define SCALAR std::complex #define SCALAR float #endif #ifndef SCALARA #define SCALARA SCALAR #endif #ifndef SCALARB #define SCALARB SCALAR #endif #ifdef ROWMAJ_A const int opt_A = RowMajor; #else const int opt_A = ColMajor; #endif #ifdef ROWMAJ_B const int opt_B = RowMajor; #else const int opt_B = ColMajor; #endif typedef SCALAR Scalar; typedef NumTraits::Real RealScalar; typedef Matrix A; typedef Matrix B; typedef Matrix C; typedef Matrix M; #ifdef HAVE_BLAS extern "C" { #include } static float fone = 1; static float fzero = 0; static double done = 1; static double szero = 0; static std::complex cfone = 1; static std::complex cfzero = 0; static std::complex cdone = 1; static std::complex cdzero = 0; static char notrans = 'N'; static char trans = 'T'; static char nonunit = 'N'; static char lower = 'L'; static char right = 'R'; static int intone = 1; #ifdef ROWMAJ_A const char transA = trans; #else const char transA = notrans; #endif #ifdef ROWMAJ_B const char transB = trans; #else const char transB = notrans; #endif template void blas_gemm(const A& a, const B& b, MatrixXf& c) { int M = c.rows(); int N = c.cols(); int K = a.cols(); int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows(); sgemm_(&transA,&transB,&M,&N,&K,&fone, const_cast(a.data()),&lda, const_cast(b.data()),&ldb,&fone, c.data(),&ldc); } template void blas_gemm(const A& a, const B& b, MatrixXd& c) { int M = c.rows(); int N = c.cols(); int K = a.cols(); int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows(); dgemm_(&transA,&transB,&M,&N,&K,&done, const_cast(a.data()),&lda, const_cast(b.data()),&ldb,&done, c.data(),&ldc); } template void blas_gemm(const A& a, const B& b, MatrixXcf& c) { int M = c.rows(); int N = c.cols(); int K = a.cols(); int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows(); cgemm_(&transA,&transB,&M,&N,&K,(float*)&cfone, const_cast((const float*)a.data()),&lda, const_cast((const float*)b.data()),&ldb,(float*)&cfone, (float*)c.data(),&ldc); } template void blas_gemm(const A& a, const B& b, MatrixXcd& c) { int M = c.rows(); int N = c.cols(); int K = a.cols(); int lda = a.outerStride(); int ldb = b.outerStride(); int ldc = c.rows(); zgemm_(&transA,&transB,&M,&N,&K,(double*)&cdone, const_cast((const double*)a.data()),&lda, const_cast((const double*)b.data()),&ldb,(double*)&cdone, (double*)c.data(),&ldc); } #endif void matlab_cplx_cplx(const M& ar, const M& ai, const M& br, const M& bi, M& cr, M& ci) { cr.noalias() += ar * br; cr.noalias() -= ai * bi; ci.noalias() += ar * bi; ci.noalias() += ai * br; // [cr ci] += [ar ai] * br + [-ai ar] * bi } void matlab_real_cplx(const M& a, const M& br, const M& bi, M& cr, M& ci) { cr.noalias() += a * br; ci.noalias() += a * bi; } void matlab_cplx_real(const M& ar, const M& ai, const M& b, M& cr, M& ci) { cr.noalias() += ar * b; ci.noalias() += ai * b; } template EIGEN_DONT_INLINE void gemm(const A& a, const B& b, C& c) { c.noalias() += a * b; } int main(int argc, char ** argv) { std::ptrdiff_t l1 = internal::queryL1CacheSize(); std::ptrdiff_t l2 = internal::queryTopLevelCacheSize(); std::cout << "L1 cache size = " << (l1>0 ? l1/1024 : -1) << " KB\n"; std::cout << "L2/L3 cache size = " << (l2>0 ? l2/1024 : -1) << " KB\n"; typedef internal::gebp_traits Traits; std::cout << "Register blocking = " << Traits::mr << " x " << Traits::nr << "\n"; int rep = 1; // number of repetitions per try int tries = 2; // number of tries, we keep the best int s = 2048; int m = s; int n = s; int p = s; int cache_size1=-1, cache_size2=l2, cache_size3 = 0; bool need_help = false; for (int i=1; i -c -t -p \n"; std::cout << " : size\n"; std::cout << " : rows columns depth\n"; return 1; } #if EIGEN_VERSION_AT_LEAST(3,2,90) if(cache_size1>0) setCpuCacheSizes(cache_size1,cache_size2,cache_size3); #endif A a(m,p); a.setRandom(); B b(p,n); b.setRandom(); C c(m,n); c.setOnes(); C rc = c; std::cout << "Matrix sizes = " << m << "x" << p << " * " << p << "x" << n << "\n"; std::ptrdiff_t mc(m), nc(n), kc(p); internal::computeProductBlockingSizes(kc, mc, nc); std::cout << "blocking size (mc x kc) = " << mc << " x " << kc << " x " << nc << "\n"; C r = c; // check the parallel product is correct #if defined EIGEN_HAS_OPENMP Eigen::initParallel(); int procs = omp_get_max_threads(); if(procs>1) { #ifdef HAVE_BLAS blas_gemm(a,b,r); #else omp_set_num_threads(1); r.noalias() += a * b; omp_set_num_threads(procs); #endif c.noalias() += a * b; if(!r.isApprox(c)) std::cerr << "Warning, your parallel product is crap!\n\n"; } #elif defined HAVE_BLAS blas_gemm(a,b,r); c.noalias() += a * b; if(!r.isApprox(c)) { std::cout << (r - c).norm()/r.norm() << "\n"; std::cerr << "Warning, your product is crap!\n\n"; } #else if(1.*m*n*p<2000.*2000*2000) { gemm(a,b,c); r.noalias() += a.cast() .lazyProduct( b.cast() ); if(!r.isApprox(c)) { std::cout << (r - c).norm()/r.norm() << "\n"; std::cerr << "Warning, your product is crap!\n\n"; } } #endif #ifdef HAVE_BLAS BenchTimer tblas; c = rc; BENCH(tblas, tries, rep, blas_gemm(a,b,c)); std::cout << "blas cpu " << tblas.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tblas.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tblas.total(CPU_TIMER) << "s)\n"; std::cout << "blas real " << tblas.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tblas.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tblas.total(REAL_TIMER) << "s)\n"; #endif // warm start if(b.norm()+a.norm()==123.554) std::cout << "\n"; BenchTimer tmt; c = rc; BENCH(tmt, tries, rep, gemm(a,b,c)); std::cout << "eigen cpu " << tmt.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(CPU_TIMER) << "s)\n"; std::cout << "eigen real " << tmt.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n"; #ifdef EIGEN_HAS_OPENMP if(procs>1) { BenchTimer tmono; omp_set_num_threads(1); Eigen::setNbThreads(1); c = rc; BENCH(tmono, tries, rep, gemm(a,b,c)); std::cout << "eigen mono cpu " << tmono.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(CPU_TIMER) << "s)\n"; std::cout << "eigen mono real " << tmono.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(REAL_TIMER) << "s)\n"; std::cout << "mt speed up x" << tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER) << " => " << (100.0*tmono.best(CPU_TIMER) / tmt.best(REAL_TIMER))/procs << "%\n"; } #endif if(1.*m*n*p<30*30*30) { BenchTimer tmt; c = rc; BENCH(tmt, tries, rep, c.noalias()+=a.lazyProduct(b)); std::cout << "lazy cpu " << tmt.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(CPU_TIMER) << "s)\n"; std::cout << "lazy real " << tmt.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n"; } #ifdef DECOUPLED if((NumTraits::IsComplex) && (NumTraits::IsComplex)) { M ar(m,p); ar.setRandom(); M ai(m,p); ai.setRandom(); M br(p,n); br.setRandom(); M bi(p,n); bi.setRandom(); M cr(m,n); cr.setRandom(); M ci(m,n); ci.setRandom(); BenchTimer t; BENCH(t, tries, rep, matlab_cplx_cplx(ar,ai,br,bi,cr,ci)); std::cout << "\"matlab\" cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n"; std::cout << "\"matlab\" real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n"; } if((!NumTraits::IsComplex) && (NumTraits::IsComplex)) { M a(m,p); a.setRandom(); M br(p,n); br.setRandom(); M bi(p,n); bi.setRandom(); M cr(m,n); cr.setRandom(); M ci(m,n); ci.setRandom(); BenchTimer t; BENCH(t, tries, rep, matlab_real_cplx(a,br,bi,cr,ci)); std::cout << "\"matlab\" cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n"; std::cout << "\"matlab\" real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n"; } if((NumTraits::IsComplex) && (!NumTraits::IsComplex)) { M ar(m,p); ar.setRandom(); M ai(m,p); ai.setRandom(); M b(p,n); b.setRandom(); M cr(m,n); cr.setRandom(); M ci(m,n); ci.setRandom(); BenchTimer t; BENCH(t, tries, rep, matlab_cplx_real(ar,ai,b,cr,ci)); std::cout << "\"matlab\" cpu " << t.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << t.total(CPU_TIMER) << "s)\n"; std::cout << "\"matlab\" real " << t.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/t.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << t.total(REAL_TIMER) << "s)\n"; } #endif return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/bench_move_semantics.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2020 Sebastien Boisvert // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "BenchTimer.h" #include "../test/MovableScalar.h" #include #include #include template void copy_matrix(MatrixType& m) { MatrixType tmp(m); m = tmp; } template void move_matrix(MatrixType&& m) { MatrixType tmp(std::move(m)); m = std::move(tmp); } template void bench(const std::string& label) { using MatrixType = Eigen::Matrix,1,10>; Eigen::BenchTimer t; int tries = 10; int rep = 1000000; MatrixType data = MatrixType::Random().eval(); MatrixType dest; BENCH(t, tries, rep, copy_matrix(data)); std::cout << label << " copy semantics: " << 1e3*t.best(Eigen::CPU_TIMER) << " ms" << std::endl; BENCH(t, tries, rep, move_matrix(std::move(data))); std::cout << label << " move semantics: " << 1e3*t.best(Eigen::CPU_TIMER) << " ms" << std::endl; } int main() { bench("float"); bench("double"); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/bench_multi_compilers.sh ================================================ #!/bin/bash if (($# < 2)); then echo "Usage: $0 compilerlist.txt benchfile.cpp" else compilerlist=$1 benchfile=$2 g=0 source $compilerlist # for each compiler, compile benchfile and run the benchmark for (( i=0 ; i /dev/null echo "" else echo "compiler not found: $compiler" fi done fi ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/bench_norm.cpp ================================================ #include #include #include #include "BenchTimer.h" using namespace Eigen; using namespace std; template EIGEN_DONT_INLINE typename T::Scalar sqsumNorm(T& v) { return v.norm(); } template EIGEN_DONT_INLINE typename T::Scalar stableNorm(T& v) { return v.stableNorm(); } template EIGEN_DONT_INLINE typename T::Scalar hypotNorm(T& v) { return v.hypotNorm(); } template EIGEN_DONT_INLINE typename T::Scalar blueNorm(T& v) { return v.blueNorm(); } template EIGEN_DONT_INLINE typename T::Scalar lapackNorm(T& v) { typedef typename T::Scalar Scalar; int n = v.size(); Scalar scale = 0; Scalar ssq = 1; for (int i=0;i= ax) { ssq += numext::abs2(ax/scale); } else { ssq = Scalar(1) + ssq * numext::abs2(scale/ax); scale = ax; } } return scale * std::sqrt(ssq); } template EIGEN_DONT_INLINE typename T::Scalar twopassNorm(T& v) { typedef typename T::Scalar Scalar; Scalar s = v.array().abs().maxCoeff(); return s*(v/s).norm(); } template EIGEN_DONT_INLINE typename T::Scalar bl2passNorm(T& v) { return v.stableNorm(); } template EIGEN_DONT_INLINE typename T::Scalar divacNorm(T& v) { int n =v.size() / 2; for (int i=0;i0) { for (int i=0;i EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v) { #ifndef EIGEN_VECTORIZE return v.blueNorm(); #else typedef typename T::Scalar Scalar; static int nmax = 0; static Scalar b1, b2, s1m, s2m, overfl, rbig, relerr; int n; if(nmax <= 0) { int nbig, ibeta, it, iemin, iemax, iexp; Scalar abig, eps; nbig = NumTraits::highest(); // largest integer ibeta = std::numeric_limits::radix; // NumTraits::Base; // base for floating-point numbers it = NumTraits::digits(); // NumTraits::Mantissa; // number of base-beta digits in mantissa iemin = NumTraits::min_exponent(); // minimum exponent iemax = NumTraits::max_exponent(); // maximum exponent rbig = NumTraits::highest(); // largest floating-point number // Check the basic machine-dependent constants. if(iemin > 1 - 2*it || 1+it>iemax || (it==2 && ibeta<5) || (it<=4 && ibeta <= 3 ) || it<2) { eigen_assert(false && "the algorithm cannot be guaranteed on this computer"); } iexp = -((1-iemin)/2); b1 = std::pow(ibeta, iexp); // lower boundary of midrange iexp = (iemax + 1 - it)/2; b2 = std::pow(ibeta,iexp); // upper boundary of midrange iexp = (2-iemin)/2; s1m = std::pow(ibeta,iexp); // scaling factor for lower range iexp = - ((iemax+it)/2); s2m = std::pow(ibeta,iexp); // scaling factor for upper range overfl = rbig*s2m; // overflow boundary for abig eps = std::pow(ibeta, 1-it); relerr = std::sqrt(eps); // tolerance for neglecting asml abig = 1.0/eps - 1.0; if (Scalar(nbig)>abig) nmax = abig; // largest safe n else nmax = nbig; } typedef typename internal::packet_traits::type Packet; const int ps = internal::packet_traits::size; Packet pasml = internal::pset1(Scalar(0)); Packet pamed = internal::pset1(Scalar(0)); Packet pabig = internal::pset1(Scalar(0)); Packet ps2m = internal::pset1(s2m); Packet ps1m = internal::pset1(s1m); Packet pb2 = internal::pset1(b2); Packet pb1 = internal::pset1(b1); for(int j=0; j(j)); Packet ax_s2m = internal::pmul(ax,ps2m); Packet ax_s1m = internal::pmul(ax,ps1m); Packet maskBig = internal::plt(pb2,ax); Packet maskSml = internal::plt(ax,pb1); // Packet maskMed = internal::pand(maskSml,maskBig); // Packet scale = internal::pset1(Scalar(0)); // scale = internal::por(scale, internal::pand(maskBig,ps2m)); // scale = internal::por(scale, internal::pand(maskSml,ps1m)); // scale = internal::por(scale, internal::pandnot(internal::pset1(Scalar(1)),maskMed)); // ax = internal::pmul(ax,scale); // ax = internal::pmul(ax,ax); // pabig = internal::padd(pabig, internal::pand(maskBig, ax)); // pasml = internal::padd(pasml, internal::pand(maskSml, ax)); // pamed = internal::padd(pamed, internal::pandnot(ax,maskMed)); pabig = internal::padd(pabig, internal::pand(maskBig, internal::pmul(ax_s2m,ax_s2m))); pasml = internal::padd(pasml, internal::pand(maskSml, internal::pmul(ax_s1m,ax_s1m))); pamed = internal::padd(pamed, internal::pandnot(internal::pmul(ax,ax),internal::pand(maskSml,maskBig))); } Scalar abig = internal::predux(pabig); Scalar asml = internal::predux(pasml); Scalar amed = internal::predux(pamed); if(abig > Scalar(0)) { abig = std::sqrt(abig); if(abig > overfl) { eigen_assert(false && "overflow"); return rbig; } if(amed > Scalar(0)) { abig = abig/s2m; amed = std::sqrt(amed); } else { return abig/s2m; } } else if(asml > Scalar(0)) { if (amed > Scalar(0)) { abig = std::sqrt(amed); amed = std::sqrt(asml) / s1m; } else { return std::sqrt(asml)/s1m; } } else { return std::sqrt(amed); } asml = std::min(abig, amed); abig = std::max(abig, amed); if(asml <= abig*relerr) return abig; else return abig * std::sqrt(Scalar(1) + numext::abs2(asml/abig)); #endif } #define BENCH_PERF(NRM) { \ float af = 0; double ad = 0; std::complex ac = 0; \ Eigen::BenchTimer tf, td, tcf; tf.reset(); td.reset(); tcf.reset();\ for (int k=0; k()); double yd = based * std::abs(internal::random()); VectorXf vf = VectorXf::Ones(s) * yf; VectorXd vd = VectorXd::Ones(s) * yd; std::cout << "reference\t" << std::sqrt(double(s))*yf << "\t" << std::sqrt(double(s))*yd << "\n"; std::cout << "sqsumNorm\t" << sqsumNorm(vf) << "\t" << sqsumNorm(vd) << "\n"; std::cout << "hypotNorm\t" << hypotNorm(vf) << "\t" << hypotNorm(vd) << "\n"; std::cout << "blueNorm\t" << blueNorm(vf) << "\t" << blueNorm(vd) << "\n"; std::cout << "pblueNorm\t" << pblueNorm(vf) << "\t" << pblueNorm(vd) << "\n"; std::cout << "lapackNorm\t" << lapackNorm(vf) << "\t" << lapackNorm(vd) << "\n"; std::cout << "twopassNorm\t" << twopassNorm(vf) << "\t" << twopassNorm(vd) << "\n"; std::cout << "bl2passNorm\t" << bl2passNorm(vf) << "\t" << bl2passNorm(vd) << "\n"; } void check_accuracy_var(int ef0, int ef1, int ed0, int ed1, int s) { VectorXf vf(s); VectorXd vd(s); for (int i=0; i()) * std::pow(double(10), internal::random(ef0,ef1)); vd[i] = std::abs(internal::random()) * std::pow(double(10), internal::random(ed0,ed1)); } //std::cout << "reference\t" << internal::sqrt(double(s))*yf << "\t" << internal::sqrt(double(s))*yd << "\n"; std::cout << "sqsumNorm\t" << sqsumNorm(vf) << "\t" << sqsumNorm(vd) << "\t" << sqsumNorm(vf.cast()) << "\t" << sqsumNorm(vd.cast()) << "\n"; std::cout << "hypotNorm\t" << hypotNorm(vf) << "\t" << hypotNorm(vd) << "\t" << hypotNorm(vf.cast()) << "\t" << hypotNorm(vd.cast()) << "\n"; std::cout << "blueNorm\t" << blueNorm(vf) << "\t" << blueNorm(vd) << "\t" << blueNorm(vf.cast()) << "\t" << blueNorm(vd.cast()) << "\n"; std::cout << "pblueNorm\t" << pblueNorm(vf) << "\t" << pblueNorm(vd) << "\t" << blueNorm(vf.cast()) << "\t" << blueNorm(vd.cast()) << "\n"; std::cout << "lapackNorm\t" << lapackNorm(vf) << "\t" << lapackNorm(vd) << "\t" << lapackNorm(vf.cast()) << "\t" << lapackNorm(vd.cast()) << "\n"; std::cout << "twopassNorm\t" << twopassNorm(vf) << "\t" << twopassNorm(vd) << "\t" << twopassNorm(vf.cast()) << "\t" << twopassNorm(vd.cast()) << "\n"; // std::cout << "bl2passNorm\t" << bl2passNorm(vf) << "\t" << bl2passNorm(vd) << "\t" << bl2passNorm(vf.cast()) << "\t" << bl2passNorm(vd.cast()) << "\n"; } int main(int argc, char** argv) { int tries = 10; int iters = 100000; double y = 1.1345743233455785456788e12 * internal::random(); VectorXf v = VectorXf::Ones(1024) * y; // return 0; int s = 10000; double basef_ok = 1.1345743233455785456788e15; double based_ok = 1.1345743233455785456788e95; double basef_under = 1.1345743233455785456788e-27; double based_under = 1.1345743233455785456788e-303; double basef_over = 1.1345743233455785456788e+27; double based_over = 1.1345743233455785456788e+302; std::cout.precision(20); std::cerr << "\nNo under/overflow:\n"; check_accuracy(basef_ok, based_ok, s); std::cerr << "\nUnderflow:\n"; check_accuracy(basef_under, based_under, s); std::cerr << "\nOverflow:\n"; check_accuracy(basef_over, based_over, s); std::cerr << "\nVarying (over):\n"; for (int k=0; k<1; ++k) { check_accuracy_var(20,27,190,302,s); std::cout << "\n"; } std::cerr << "\nVarying (under):\n"; for (int k=0; k<1; ++k) { check_accuracy_var(-27,20,-302,-190,s); std::cout << "\n"; } y = 1; std::cout.precision(4); int s1 = 1024*1024*32; std::cerr << "Performance (out of cache, " << s1 << "):\n"; { int iters = 1; VectorXf vf = VectorXf::Random(s1) * y; VectorXd vd = VectorXd::Random(s1) * y; VectorXcf vcf = VectorXcf::Random(s1) * y; BENCH_PERF(sqsumNorm); BENCH_PERF(stableNorm); BENCH_PERF(blueNorm); BENCH_PERF(pblueNorm); BENCH_PERF(lapackNorm); BENCH_PERF(hypotNorm); BENCH_PERF(twopassNorm); BENCH_PERF(bl2passNorm); } std::cerr << "\nPerformance (in cache, " << 512 << "):\n"; { int iters = 100000; VectorXf vf = VectorXf::Random(512) * y; VectorXd vd = VectorXd::Random(512) * y; VectorXcf vcf = VectorXcf::Random(512) * y; BENCH_PERF(sqsumNorm); BENCH_PERF(stableNorm); BENCH_PERF(blueNorm); BENCH_PERF(pblueNorm); BENCH_PERF(lapackNorm); BENCH_PERF(hypotNorm); BENCH_PERF(twopassNorm); BENCH_PERF(bl2passNorm); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/bench_reverse.cpp ================================================ #include #include #include using namespace Eigen; #ifndef REPEAT #define REPEAT 100000 #endif #ifndef TRIES #define TRIES 20 #endif typedef double Scalar; template __attribute__ ((noinline)) void bench_reverse(const MatrixType& m) { int rows = m.rows(); int cols = m.cols(); int size = m.size(); int repeats = (REPEAT*1000)/size; MatrixType a = MatrixType::Random(rows,cols); MatrixType b = MatrixType::Random(rows,cols); BenchTimer timerB, timerH, timerV; Scalar acc = 0; int r = internal::random(0,rows-1); int c = internal::random(0,cols-1); for (int t=0; t0; ++i) { bench_reverse(Matrix(dynsizes[i],dynsizes[i])); bench_reverse(Matrix(dynsizes[i]*dynsizes[i])); } // bench_reverse(Matrix()); // bench_reverse(Matrix()); // bench_reverse(Matrix()); // bench_reverse(Matrix()); // bench_reverse(Matrix()); // bench_reverse(Matrix()); // bench_reverse(Matrix()); // bench_reverse(Matrix()); // bench_reverse(Matrix()); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/bench_sum.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main() { typedef Matrix Vec; Vec v(SIZE); v.setZero(); v[0] = 1; v[1] = 2; for(int i = 0; i < 1000000; i++) { v.coeffRef(0) += v.sum() * SCALAR(1e-20); } cout << v.sum() << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/bench_unrolling ================================================ #!/bin/bash # gcc : CXX="g++ -finline-limit=10000 -ftemplate-depth-2000 --param max-inline-recursive-depth=2000" # icc : CXX="icpc -fast -no-inline-max-size -fno-exceptions" CXX=${CXX-g++ -finline-limit=10000 -ftemplate-depth-2000 --param max-inline-recursive-depth=2000} # default value for ((i=1; i<16; ++i)); do echo "Matrix size: $i x $i :" $CXX -O3 -I.. -DNDEBUG benchmark.cpp -DMATSIZE=$i -DEIGEN_UNROLLING_LIMIT=400 -o benchmark && time ./benchmark >/dev/null $CXX -O3 -I.. -DNDEBUG -finline-limit=10000 benchmark.cpp -DMATSIZE=$i -DEIGEN_DONT_USE_UNROLLED_LOOPS=1 -o benchmark && time ./benchmark >/dev/null echo " " done ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/benchmark-blocking-sizes.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include #include #include #include #include #include #include bool eigen_use_specific_block_size; int eigen_block_size_k, eigen_block_size_m, eigen_block_size_n; #define EIGEN_TEST_SPECIFIC_BLOCKING_SIZES eigen_use_specific_block_size #define EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K eigen_block_size_k #define EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M eigen_block_size_m #define EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N eigen_block_size_n #include #include using namespace Eigen; using namespace std; static BenchTimer timer; // how many times we repeat each measurement. // measurements are randomly shuffled - we're not doing // all N identical measurements in a row. const int measurement_repetitions = 3; // Timings below this value are too short to be accurate, // we'll repeat measurements with more iterations until // we get a timing above that threshold. const float min_accurate_time = 1e-2f; // See --min-working-set-size command line parameter. size_t min_working_set_size = 0; float max_clock_speed = 0.0f; // range of sizes that we will benchmark (in all 3 K,M,N dimensions) const size_t maxsize = 2048; const size_t minsize = 16; typedef MatrixXf MatrixType; typedef MatrixType::Scalar Scalar; typedef internal::packet_traits::type Packet; static_assert((maxsize & (maxsize - 1)) == 0, "maxsize must be a power of two"); static_assert((minsize & (minsize - 1)) == 0, "minsize must be a power of two"); static_assert(maxsize > minsize, "maxsize must be larger than minsize"); static_assert(maxsize < (minsize << 16), "maxsize must be less than (minsize<<16)"); // just a helper to store a triple of K,M,N sizes for matrix product struct size_triple_t { size_t k, m, n; size_triple_t() : k(0), m(0), n(0) {} size_triple_t(size_t _k, size_t _m, size_t _n) : k(_k), m(_m), n(_n) {} size_triple_t(const size_triple_t& o) : k(o.k), m(o.m), n(o.n) {} size_triple_t(uint16_t compact) { k = 1 << ((compact & 0xf00) >> 8); m = 1 << ((compact & 0x0f0) >> 4); n = 1 << ((compact & 0x00f) >> 0); } }; uint8_t log2_pot(size_t x) { size_t l = 0; while (x >>= 1) l++; return l; } // Convert between size tripes and a compact form fitting in 12 bits // where each size, which must be a POT, is encoded as its log2, on 4 bits // so the largest representable size is 2^15 == 32k ... big enough. uint16_t compact_size_triple(size_t k, size_t m, size_t n) { return (log2_pot(k) << 8) | (log2_pot(m) << 4) | log2_pot(n); } uint16_t compact_size_triple(const size_triple_t& t) { return compact_size_triple(t.k, t.m, t.n); } // A single benchmark. Initially only contains benchmark params. // Then call run(), which stores the result in the gflops field. struct benchmark_t { uint16_t compact_product_size; uint16_t compact_block_size; bool use_default_block_size; float gflops; benchmark_t() : compact_product_size(0) , compact_block_size(0) , use_default_block_size(false) , gflops(0) { } benchmark_t(size_t pk, size_t pm, size_t pn, size_t bk, size_t bm, size_t bn) : compact_product_size(compact_size_triple(pk, pm, pn)) , compact_block_size(compact_size_triple(bk, bm, bn)) , use_default_block_size(false) , gflops(0) {} benchmark_t(size_t pk, size_t pm, size_t pn) : compact_product_size(compact_size_triple(pk, pm, pn)) , compact_block_size(0) , use_default_block_size(true) , gflops(0) {} void run(); }; ostream& operator<<(ostream& s, const benchmark_t& b) { s << hex << b.compact_product_size << dec; if (b.use_default_block_size) { size_triple_t t(b.compact_product_size); Index k = t.k, m = t.m, n = t.n; internal::computeProductBlockingSizes(k, m, n); s << " default(" << k << ", " << m << ", " << n << ")"; } else { s << " " << hex << b.compact_block_size << dec; } s << " " << b.gflops; return s; } // We sort first by increasing benchmark parameters, // then by decreasing performance. bool operator<(const benchmark_t& b1, const benchmark_t& b2) { return b1.compact_product_size < b2.compact_product_size || (b1.compact_product_size == b2.compact_product_size && ( (b1.compact_block_size < b2.compact_block_size || ( b1.compact_block_size == b2.compact_block_size && b1.gflops > b2.gflops)))); } void benchmark_t::run() { size_triple_t productsizes(compact_product_size); if (use_default_block_size) { eigen_use_specific_block_size = false; } else { // feed eigen with our custom blocking params eigen_use_specific_block_size = true; size_triple_t blocksizes(compact_block_size); eigen_block_size_k = blocksizes.k; eigen_block_size_m = blocksizes.m; eigen_block_size_n = blocksizes.n; } // set up the matrix pool const size_t combined_three_matrices_sizes = sizeof(Scalar) * (productsizes.k * productsizes.m + productsizes.k * productsizes.n + productsizes.m * productsizes.n); // 64 M is large enough that nobody has a cache bigger than that, // while still being small enough that everybody has this much RAM, // so conveniently we don't need to special-case platforms here. const size_t unlikely_large_cache_size = 64 << 20; const size_t working_set_size = min_working_set_size ? min_working_set_size : unlikely_large_cache_size; const size_t matrix_pool_size = 1 + working_set_size / combined_three_matrices_sizes; MatrixType *lhs = new MatrixType[matrix_pool_size]; MatrixType *rhs = new MatrixType[matrix_pool_size]; MatrixType *dst = new MatrixType[matrix_pool_size]; for (size_t i = 0; i < matrix_pool_size; i++) { lhs[i] = MatrixType::Zero(productsizes.m, productsizes.k); rhs[i] = MatrixType::Zero(productsizes.k, productsizes.n); dst[i] = MatrixType::Zero(productsizes.m, productsizes.n); } // main benchmark loop int iters_at_a_time = 1; float time_per_iter = 0.0f; size_t matrix_index = 0; while (true) { double starttime = timer.getCpuTime(); for (int i = 0; i < iters_at_a_time; i++) { dst[matrix_index].noalias() = lhs[matrix_index] * rhs[matrix_index]; matrix_index++; if (matrix_index == matrix_pool_size) { matrix_index = 0; } } double endtime = timer.getCpuTime(); const float timing = float(endtime - starttime); if (timing >= min_accurate_time) { time_per_iter = timing / iters_at_a_time; break; } iters_at_a_time *= 2; } delete[] lhs; delete[] rhs; delete[] dst; gflops = 2e-9 * productsizes.k * productsizes.m * productsizes.n / time_per_iter; } void print_cpuinfo() { #ifdef __linux__ cout << "contents of /proc/cpuinfo:" << endl; string line; ifstream cpuinfo("/proc/cpuinfo"); if (cpuinfo.is_open()) { while (getline(cpuinfo, line)) { cout << line << endl; } cpuinfo.close(); } cout << endl; #elif defined __APPLE__ cout << "output of sysctl hw:" << endl; system("sysctl hw"); cout << endl; #endif } template string type_name() { return "unknown"; } template<> string type_name() { return "float"; } template<> string type_name() { return "double"; } struct action_t { virtual const char* invokation_name() const { abort(); return nullptr; } virtual void run() const { abort(); } virtual ~action_t() {} }; void show_usage_and_exit(int /*argc*/, char* argv[], const vector>& available_actions) { cerr << "usage: " << argv[0] << " [options...]" << endl << endl; cerr << "available actions:" << endl << endl; for (auto it = available_actions.begin(); it != available_actions.end(); ++it) { cerr << " " << (*it)->invokation_name() << endl; } cerr << endl; cerr << "options:" << endl << endl; cerr << " --min-working-set-size=N:" << endl; cerr << " Set the minimum working set size to N bytes." << endl; cerr << " This is rounded up as needed to a multiple of matrix size." << endl; cerr << " A larger working set lowers the chance of a warm cache." << endl; cerr << " The default value 0 means use a large enough working" << endl; cerr << " set to likely outsize caches." << endl; cerr << " A value of 1 (that is, 1 byte) would mean don't do anything to" << endl; cerr << " avoid warm caches." << endl; exit(1); } float measure_clock_speed() { cerr << "Measuring clock speed... \r" << flush; vector all_gflops; for (int i = 0; i < 8; i++) { benchmark_t b(1024, 1024, 1024); b.run(); all_gflops.push_back(b.gflops); } sort(all_gflops.begin(), all_gflops.end()); float stable_estimate = all_gflops[2] + all_gflops[3] + all_gflops[4] + all_gflops[5]; // multiply by an arbitrary constant to discourage trying doing anything with the // returned values besides just comparing them with each other. float result = stable_estimate * 123.456f; return result; } struct human_duration_t { int seconds; human_duration_t(int s) : seconds(s) {} }; ostream& operator<<(ostream& s, const human_duration_t& d) { int remainder = d.seconds; if (remainder > 3600) { int hours = remainder / 3600; s << hours << " h "; remainder -= hours * 3600; } if (remainder > 60) { int minutes = remainder / 60; s << minutes << " min "; remainder -= minutes * 60; } if (d.seconds < 600) { s << remainder << " s"; } return s; } const char session_filename[] = "/data/local/tmp/benchmark-blocking-sizes-session.data"; void serialize_benchmarks(const char* filename, const vector& benchmarks, size_t first_benchmark_to_run) { FILE* file = fopen(filename, "w"); if (!file) { cerr << "Could not open file " << filename << " for writing." << endl; cerr << "Do you have write permissions on the current working directory?" << endl; exit(1); } size_t benchmarks_vector_size = benchmarks.size(); fwrite(&max_clock_speed, sizeof(max_clock_speed), 1, file); fwrite(&benchmarks_vector_size, sizeof(benchmarks_vector_size), 1, file); fwrite(&first_benchmark_to_run, sizeof(first_benchmark_to_run), 1, file); fwrite(benchmarks.data(), sizeof(benchmark_t), benchmarks.size(), file); fclose(file); } bool deserialize_benchmarks(const char* filename, vector& benchmarks, size_t& first_benchmark_to_run) { FILE* file = fopen(filename, "r"); if (!file) { return false; } if (1 != fread(&max_clock_speed, sizeof(max_clock_speed), 1, file)) { return false; } size_t benchmarks_vector_size = 0; if (1 != fread(&benchmarks_vector_size, sizeof(benchmarks_vector_size), 1, file)) { return false; } if (1 != fread(&first_benchmark_to_run, sizeof(first_benchmark_to_run), 1, file)) { return false; } benchmarks.resize(benchmarks_vector_size); if (benchmarks.size() != fread(benchmarks.data(), sizeof(benchmark_t), benchmarks.size(), file)) { return false; } unlink(filename); return true; } void try_run_some_benchmarks( vector& benchmarks, double time_start, size_t& first_benchmark_to_run) { if (first_benchmark_to_run == benchmarks.size()) { return; } double time_last_progress_update = 0; double time_last_clock_speed_measurement = 0; double time_now = 0; size_t benchmark_index = first_benchmark_to_run; while (true) { float ratio_done = float(benchmark_index) / benchmarks.size(); time_now = timer.getRealTime(); // We check clock speed every minute and at the end. if (benchmark_index == benchmarks.size() || time_now > time_last_clock_speed_measurement + 60.0f) { time_last_clock_speed_measurement = time_now; // Ensure that clock speed is as expected float current_clock_speed = measure_clock_speed(); // The tolerance needs to be smaller than the relative difference between // clock speeds that a device could operate under. // It seems unlikely that a device would be throttling clock speeds by // amounts smaller than 2%. // With a value of 1%, I was getting within noise on a Sandy Bridge. const float clock_speed_tolerance = 0.02f; if (current_clock_speed > (1 + clock_speed_tolerance) * max_clock_speed) { // Clock speed is now higher than we previously measured. // Either our initial measurement was inaccurate, which won't happen // too many times as we are keeping the best clock speed value and // and allowing some tolerance; or something really weird happened, // which invalidates all benchmark results collected so far. // Either way, we better restart all over again now. if (benchmark_index) { cerr << "Restarting at " << 100.0f * ratio_done << " % because clock speed increased. " << endl; } max_clock_speed = current_clock_speed; first_benchmark_to_run = 0; return; } bool rerun_last_tests = false; if (current_clock_speed < (1 - clock_speed_tolerance) * max_clock_speed) { cerr << "Measurements completed so far: " << 100.0f * ratio_done << " % " << endl; cerr << "Clock speed seems to be only " << current_clock_speed/max_clock_speed << " times what it used to be." << endl; unsigned int seconds_to_sleep_if_lower_clock_speed = 1; while (current_clock_speed < (1 - clock_speed_tolerance) * max_clock_speed) { if (seconds_to_sleep_if_lower_clock_speed > 32) { cerr << "Sleeping longer probably won't make a difference." << endl; cerr << "Serializing benchmarks to " << session_filename << endl; serialize_benchmarks(session_filename, benchmarks, first_benchmark_to_run); cerr << "Now restart this benchmark, and it should pick up where we left." << endl; exit(2); } rerun_last_tests = true; cerr << "Sleeping " << seconds_to_sleep_if_lower_clock_speed << " s... \r" << endl; sleep(seconds_to_sleep_if_lower_clock_speed); current_clock_speed = measure_clock_speed(); seconds_to_sleep_if_lower_clock_speed *= 2; } } if (rerun_last_tests) { cerr << "Redoing the last " << 100.0f * float(benchmark_index - first_benchmark_to_run) / benchmarks.size() << " % because clock speed had been low. " << endl; return; } // nothing wrong with the clock speed so far, so there won't be a need to rerun // benchmarks run so far in case we later encounter a lower clock speed. first_benchmark_to_run = benchmark_index; } if (benchmark_index == benchmarks.size()) { // We're done! first_benchmark_to_run = benchmarks.size(); // Erase progress info cerr << " " << endl; return; } // Display progress info on stderr if (time_now > time_last_progress_update + 1.0f) { time_last_progress_update = time_now; cerr << "Measurements... " << 100.0f * ratio_done << " %, ETA " << human_duration_t(float(time_now - time_start) * (1.0f - ratio_done) / ratio_done) << " \r" << flush; } // This is where we actually run a benchmark! benchmarks[benchmark_index].run(); benchmark_index++; } } void run_benchmarks(vector& benchmarks) { size_t first_benchmark_to_run; vector deserialized_benchmarks; bool use_deserialized_benchmarks = false; if (deserialize_benchmarks(session_filename, deserialized_benchmarks, first_benchmark_to_run)) { cerr << "Found serialized session with " << 100.0f * first_benchmark_to_run / deserialized_benchmarks.size() << " % already done" << endl; if (deserialized_benchmarks.size() == benchmarks.size() && first_benchmark_to_run > 0 && first_benchmark_to_run < benchmarks.size()) { use_deserialized_benchmarks = true; } } if (use_deserialized_benchmarks) { benchmarks = deserialized_benchmarks; } else { // not using deserialized benchmarks, starting from scratch first_benchmark_to_run = 0; // Randomly shuffling benchmarks allows us to get accurate enough progress info, // as now the cheap/expensive benchmarks are randomly mixed so they average out. // It also means that if data is corrupted for some time span, the odds are that // not all repetitions of a given benchmark will be corrupted. random_shuffle(benchmarks.begin(), benchmarks.end()); } for (int i = 0; i < 4; i++) { max_clock_speed = max(max_clock_speed, measure_clock_speed()); } double time_start = 0.0; while (first_benchmark_to_run < benchmarks.size()) { if (first_benchmark_to_run == 0) { time_start = timer.getRealTime(); } try_run_some_benchmarks(benchmarks, time_start, first_benchmark_to_run); } // Sort timings by increasing benchmark parameters, and decreasing gflops. // The latter is very important. It means that we can ignore all but the first // benchmark with given parameters. sort(benchmarks.begin(), benchmarks.end()); // Collect best (i.e. now first) results for each parameter values. vector best_benchmarks; for (auto it = benchmarks.begin(); it != benchmarks.end(); ++it) { if (best_benchmarks.empty() || best_benchmarks.back().compact_product_size != it->compact_product_size || best_benchmarks.back().compact_block_size != it->compact_block_size) { best_benchmarks.push_back(*it); } } // keep and return only the best benchmarks benchmarks = best_benchmarks; } struct measure_all_pot_sizes_action_t : action_t { virtual const char* invokation_name() const { return "all-pot-sizes"; } virtual void run() const { vector benchmarks; for (int repetition = 0; repetition < measurement_repetitions; repetition++) { for (size_t ksize = minsize; ksize <= maxsize; ksize *= 2) { for (size_t msize = minsize; msize <= maxsize; msize *= 2) { for (size_t nsize = minsize; nsize <= maxsize; nsize *= 2) { for (size_t kblock = minsize; kblock <= ksize; kblock *= 2) { for (size_t mblock = minsize; mblock <= msize; mblock *= 2) { for (size_t nblock = minsize; nblock <= nsize; nblock *= 2) { benchmarks.emplace_back(ksize, msize, nsize, kblock, mblock, nblock); } } } } } } } run_benchmarks(benchmarks); cout << "BEGIN MEASUREMENTS ALL POT SIZES" << endl; for (auto it = benchmarks.begin(); it != benchmarks.end(); ++it) { cout << *it << endl; } } }; struct measure_default_sizes_action_t : action_t { virtual const char* invokation_name() const { return "default-sizes"; } virtual void run() const { vector benchmarks; for (int repetition = 0; repetition < measurement_repetitions; repetition++) { for (size_t ksize = minsize; ksize <= maxsize; ksize *= 2) { for (size_t msize = minsize; msize <= maxsize; msize *= 2) { for (size_t nsize = minsize; nsize <= maxsize; nsize *= 2) { benchmarks.emplace_back(ksize, msize, nsize); } } } } run_benchmarks(benchmarks); cout << "BEGIN MEASUREMENTS DEFAULT SIZES" << endl; for (auto it = benchmarks.begin(); it != benchmarks.end(); ++it) { cout << *it << endl; } } }; int main(int argc, char* argv[]) { double time_start = timer.getRealTime(); cout.precision(4); cerr.precision(4); vector> available_actions; available_actions.emplace_back(new measure_all_pot_sizes_action_t); available_actions.emplace_back(new measure_default_sizes_action_t); auto action = available_actions.end(); if (argc <= 1) { show_usage_and_exit(argc, argv, available_actions); } for (auto it = available_actions.begin(); it != available_actions.end(); ++it) { if (!strcmp(argv[1], (*it)->invokation_name())) { action = it; break; } } if (action == available_actions.end()) { show_usage_and_exit(argc, argv, available_actions); } for (int i = 2; i < argc; i++) { if (argv[i] == strstr(argv[i], "--min-working-set-size=")) { const char* equals_sign = strchr(argv[i], '='); min_working_set_size = strtoul(equals_sign+1, nullptr, 10); } else { cerr << "unrecognized option: " << argv[i] << endl << endl; show_usage_and_exit(argc, argv, available_actions); } } print_cpuinfo(); cout << "benchmark parameters:" << endl; cout << "pointer size: " << 8*sizeof(void*) << " bits" << endl; cout << "scalar type: " << type_name() << endl; cout << "packet size: " << internal::packet_traits::size << endl; cout << "minsize = " << minsize << endl; cout << "maxsize = " << maxsize << endl; cout << "measurement_repetitions = " << measurement_repetitions << endl; cout << "min_accurate_time = " << min_accurate_time << endl; cout << "min_working_set_size = " << min_working_set_size; if (min_working_set_size == 0) { cout << " (try to outsize caches)"; } cout << endl << endl; (*action)->run(); double time_end = timer.getRealTime(); cerr << "Finished in " << human_duration_t(time_end - time_start) << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/benchmark.cpp ================================================ // g++ -O3 -DNDEBUG -DMATSIZE= benchmark.cpp -o benchmark && time ./benchmark #include #include #ifndef MATSIZE #define MATSIZE 3 #endif using namespace std; using namespace Eigen; #ifndef REPEAT #define REPEAT 40000000 #endif #ifndef SCALAR #define SCALAR double #endif int main(int argc, char *argv[]) { Matrix I = Matrix::Ones(); Matrix m; for(int i = 0; i < MATSIZE; i++) for(int j = 0; j < MATSIZE; j++) { m(i,j) = (i+MATSIZE*j); } asm("#begin"); for(int a = 0; a < REPEAT; a++) { m = Matrix::Ones() + 0.00005 * (m + (m*m)); } asm("#end"); cout << m << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/benchmarkSlice.cpp ================================================ // g++ -O3 -DNDEBUG benchmarkX.cpp -o benchmarkX && time ./benchmarkX #include #include using namespace std; using namespace Eigen; #ifndef REPEAT #define REPEAT 10000 #endif #ifndef SCALAR #define SCALAR float #endif int main(int argc, char *argv[]) { typedef Matrix Mat; Mat m(100, 100); m.setRandom(); for(int a = 0; a < REPEAT; a++) { int r, c, nr, nc; r = Eigen::internal::random(0,10); c = Eigen::internal::random(0,10); nr = Eigen::internal::random(50,80); nc = Eigen::internal::random(50,80); m.block(r,c,nr,nc) += Mat::Ones(nr,nc); m.block(r,c,nr,nc) *= SCALAR(10); m.block(r,c,nr,nc) -= Mat::constant(nr,nc,10); m.block(r,c,nr,nc) /= SCALAR(10); } cout << m[0] << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/benchmarkX.cpp ================================================ // g++ -fopenmp -I .. -O3 -DNDEBUG -finline-limit=1000 benchmarkX.cpp -o b && time ./b #include #include using namespace std; using namespace Eigen; #ifndef MATTYPE #define MATTYPE MatrixXLd #endif #ifndef MATSIZE #define MATSIZE 400 #endif #ifndef REPEAT #define REPEAT 100 #endif int main(int argc, char *argv[]) { MATTYPE I = MATTYPE::Ones(MATSIZE,MATSIZE); MATTYPE m(MATSIZE,MATSIZE); for(int i = 0; i < MATSIZE; i++) for(int j = 0; j < MATSIZE; j++) { m(i,j) = (i+j+1)/(MATSIZE*MATSIZE); } for(int a = 0; a < REPEAT; a++) { m = I + 0.0001 * (m + m*m); } cout << m(0,0) << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/benchmarkXcwise.cpp ================================================ // g++ -O3 -DNDEBUG benchmarkX.cpp -o benchmarkX && time ./benchmarkX #include #include using namespace std; using namespace Eigen; #ifndef VECTYPE #define VECTYPE VectorXLd #endif #ifndef VECSIZE #define VECSIZE 1000000 #endif #ifndef REPEAT #define REPEAT 1000 #endif int main(int argc, char *argv[]) { VECTYPE I = VECTYPE::Ones(VECSIZE); VECTYPE m(VECSIZE,1); for(int i = 0; i < VECSIZE; i++) { m[i] = 0.1 * i/VECSIZE; } for(int a = 0; a < REPEAT; a++) { m = VECTYPE::Ones(VECSIZE) + 0.00005 * (m.cwise().square() + m/4); } cout << m[0] << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/benchmark_suite ================================================ #!/bin/bash CXX=${CXX-g++} # default value unless caller has defined CXX echo "Fixed size 3x3, column-major, -DNDEBUG" $CXX -O3 -I .. -DNDEBUG benchmark.cpp -o benchmark && time ./benchmark >/dev/null echo "Fixed size 3x3, column-major, with asserts" $CXX -O3 -I .. benchmark.cpp -o benchmark && time ./benchmark >/dev/null echo "Fixed size 3x3, row-major, -DNDEBUG" $CXX -O3 -I .. -DEIGEN_DEFAULT_TO_ROW_MAJOR -DNDEBUG benchmark.cpp -o benchmark && time ./benchmark >/dev/null echo "Fixed size 3x3, row-major, with asserts" $CXX -O3 -I .. -DEIGEN_DEFAULT_TO_ROW_MAJOR benchmark.cpp -o benchmark && time ./benchmark >/dev/null echo "Dynamic size 20x20, column-major, -DNDEBUG" $CXX -O3 -I .. -DNDEBUG benchmarkX.cpp -o benchmarkX && time ./benchmarkX >/dev/null echo "Dynamic size 20x20, column-major, with asserts" $CXX -O3 -I .. benchmarkX.cpp -o benchmarkX && time ./benchmarkX >/dev/null echo "Dynamic size 20x20, row-major, -DNDEBUG" $CXX -O3 -I .. -DEIGEN_DEFAULT_TO_ROW_MAJOR -DNDEBUG benchmarkX.cpp -o benchmarkX && time ./benchmarkX >/dev/null echo "Dynamic size 20x20, row-major, with asserts" $CXX -O3 -I .. -DEIGEN_DEFAULT_TO_ROW_MAJOR benchmarkX.cpp -o benchmarkX && time ./benchmarkX >/dev/null ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/COPYING ================================================ GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/README ================================================ Bench Template Library **************************************** Introduction : The aim of this project is to compare the performance of available numerical libraries. The code is designed as generic and modular as possible. Thus, adding new numerical libraries or new numerical tests should require minimal effort. ***************************************** Installation : BTL uses cmake / ctest: 1 - create a build directory: $ mkdir build $ cd build 2 - configure: $ ccmake .. 3 - run the bench using ctest: $ ctest -V You can run the benchmarks only on libraries matching a given regular expression: ctest -V -R For instance: ctest -V -R eigen2 You can also select a given set of actions defining the environment variable BTL_CONFIG this way: BTL_CONFIG="-a action1{:action2}*" ctest -V An example: BTL_CONFIG="-a axpy:vector_matrix:trisolve:ata" ctest -V -R eigen2 Finally, if bench results already exist (the bench*.dat files) then they merges by keeping the best for each matrix size. If you want to overwrite the previous ones you can simply add the "--overwrite" option: BTL_CONFIG="-a axpy:vector_matrix:trisolve:ata --overwrite" ctest -V -R eigen2 4 : Analyze the result. different data files (.dat) are produced in each libs directories. If gnuplot is available, choose a directory name in the data directory to store the results and type: $ cd data $ mkdir my_directory $ cp ../libs/*/*.dat my_directory Build the data utilities in this (data) directory make Then you can look the raw data, go_mean my_directory or smooth the data first : smooth_all.sh my_directory go_mean my_directory_smooth ************************************************* Files and directories : generic_bench : all the bench sources common to all libraries actions : sources for different action wrappers (axpy, matrix-matrix product) to be tested. libs/* : bench sources specific to each tested libraries. machine_dep : directory used to store machine specific Makefile.in data : directory used to store gnuplot scripts and data analysis utilities ************************************************** Principles : the code modularity is achieved by defining two concepts : ****** Action concept : This is a class defining which kind of test must be performed (e.g. a matrix_vector_product). An Action should define the following methods : *** Ctor using the size of the problem (matrix or vector size) as an argument Action action(size); *** initialize : this method initialize the calculation (e.g. initialize the matrices and vectors arguments) action.initialize(); *** calculate : this method actually launch the calculation to be benchmarked action.calculate; *** nb_op_base() : this method returns the complexity of the calculate method (allowing the mflops evaluation) *** name() : this method returns the name of the action (std::string) ****** Interface concept : This is a class or namespace defining how to use a given library and its specific containers (matrix and vector). Up to now an interface should following types *** real_type : kind of float to be used (float or double) *** stl_vector : must correspond to std::vector *** stl_matrix : must correspond to std::vector *** gene_vector : the vector type for this interface --> e.g. (real_type *) for the C_interface *** gene_matrix : the matrix type for this interface --> e.g. (gene_vector *) for the C_interface + the following common methods *** free_matrix(gene_matrix & A, int N) dealocation of a N sized gene_matrix A *** free_vector(gene_vector & B) dealocation of a N sized gene_vector B *** matrix_from_stl(gene_matrix & A, stl_matrix & A_stl) copy the content of an stl_matrix A_stl into a gene_matrix A. The allocation of A is done in this function. *** vector_to_stl(gene_vector & B, stl_vector & B_stl) copy the content of an stl_vector B_stl into a gene_vector B. The allocation of B is done in this function. *** matrix_to_stl(gene_matrix & A, stl_matrix & A_stl) copy the content of an gene_matrix A into an stl_matrix A_stl. The size of A_STL must corresponds to the size of A. *** vector_to_stl(gene_vector & A, stl_vector & A_stl) copy the content of an gene_vector A into an stl_vector A_stl. The size of B_STL must corresponds to the size of B. *** copy_matrix(gene_matrix & source, gene_matrix & cible, int N) : copy the content of source in cible. Both source and cible must be sized NxN. *** copy_vector(gene_vector & source, gene_vector & cible, int N) : copy the content of source in cible. Both source and cible must be sized N. and the following method corresponding to the action one wants to be benchmarked : *** matrix_vector_product(const gene_matrix & A, const gene_vector & B, gene_vector & X, int N) *** matrix_matrix_product(const gene_matrix & A, const gene_matrix & B, gene_matrix & X, int N) *** ata_product(const gene_matrix & A, gene_matrix & X, int N) *** aat_product(const gene_matrix & A, gene_matrix & X, int N) *** axpy(real coef, const gene_vector & X, gene_vector & Y, int N) The bench algorithm (generic_bench/bench.hh) is templated with an action itself templated with an interface. A typical main.cpp source stored in a given library directory libs/A_LIB looks like : bench< AN_ACTION < AN_INTERFACE > >( 10 , 1000 , 50 ) ; this function will produce XY data file containing measured mflops as a function of the size for 50 sizes between 10 and 10000. This algorithm can be adapted by providing a given Perf_Analyzer object which determines how the time measurements must be done. For example, the X86_Perf_Analyzer use the asm rdtsc function and provides a very fast and accurate (but less portable) timing method. The default is the Portable_Perf_Analyzer so bench< AN_ACTION < AN_INTERFACE > >( 10 , 1000 , 50 ) ; is equivalent to bench< Portable_Perf_Analyzer,AN_ACTION < AN_INTERFACE > >( 10 , 1000 , 50 ) ; If your system supports it we suggest to use a mixed implementation (X86_perf_Analyzer+Portable_Perf_Analyzer). replace bench(size_min,size_max,nb_point); with bench(size_min,size_max,nb_point); in generic/bench.hh . ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_aat_product.hh ================================================ //===================================================== // File : action_aat_product.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:19 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_AAT_PRODUCT #define ACTION_AAT_PRODUCT #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_aat_product { public : // Ctor Action_aat_product( int size ):_size(size) { MESSAGE("Action_aat_product Ctor"); // STL matrix and vector initialization init_matrix(A_stl,_size); init_matrix(X_stl,_size); init_matrix(resu_stl,_size); // generic matrix and vector initialization Interface::matrix_from_stl(A_ref,A_stl); Interface::matrix_from_stl(X_ref,X_stl); Interface::matrix_from_stl(A,A_stl); Interface::matrix_from_stl(X,X_stl); } // invalidate copy ctor Action_aat_product( const Action_aat_product & ) { INFOS("illegal call to Action_aat_product Copy Ctor"); exit(0); } // Dtor ~Action_aat_product( void ){ MESSAGE("Action_aat_product Dtor"); // deallocation Interface::free_matrix(A,_size); Interface::free_matrix(X,_size); Interface::free_matrix(A_ref,_size); Interface::free_matrix(X_ref,_size); } // action name static inline std::string name( void ) { return "aat_"+Interface::name(); } double nb_op_base( void ){ return double(_size)*double(_size)*double(_size); } inline void initialize( void ){ Interface::copy_matrix(A_ref,A,_size); Interface::copy_matrix(X_ref,X,_size); } inline void calculate( void ) { Interface::aat_product(A,X,_size); } void check_result( void ){ if (_size>128) return; // calculation check Interface::matrix_to_stl(X,resu_stl); STL_interface::aat_product(A_stl,X_stl,_size); typename Interface::real_type error= STL_interface::norm_diff(X_stl,resu_stl); if (error>1.e-6){ INFOS("WRONG CALCULATION...residual=" << error); exit(1); } } private : typename Interface::stl_matrix A_stl; typename Interface::stl_matrix X_stl; typename Interface::stl_matrix resu_stl; typename Interface::gene_matrix A_ref; typename Interface::gene_matrix X_ref; typename Interface::gene_matrix A; typename Interface::gene_matrix X; int _size; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_ata_product.hh ================================================ //===================================================== // File : action_ata_product.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:19 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_ATA_PRODUCT #define ACTION_ATA_PRODUCT #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_ata_product { public : // Ctor Action_ata_product( int size ):_size(size) { MESSAGE("Action_ata_product Ctor"); // STL matrix and vector initialization init_matrix(A_stl,_size); init_matrix(X_stl,_size); init_matrix(resu_stl,_size); // generic matrix and vector initialization Interface::matrix_from_stl(A_ref,A_stl); Interface::matrix_from_stl(X_ref,X_stl); Interface::matrix_from_stl(A,A_stl); Interface::matrix_from_stl(X,X_stl); } // invalidate copy ctor Action_ata_product( const Action_ata_product & ) { INFOS("illegal call to Action_ata_product Copy Ctor"); exit(0); } // Dtor ~Action_ata_product( void ){ MESSAGE("Action_ata_product Dtor"); // deallocation Interface::free_matrix(A,_size); Interface::free_matrix(X,_size); Interface::free_matrix(A_ref,_size); Interface::free_matrix(X_ref,_size); } // action name static inline std::string name( void ) { return "ata_"+Interface::name(); } double nb_op_base( void ){ return 2.0*_size*_size*_size; } inline void initialize( void ){ Interface::copy_matrix(A_ref,A,_size); Interface::copy_matrix(X_ref,X,_size); } inline void calculate( void ) { Interface::ata_product(A,X,_size); } void check_result( void ){ if (_size>128) return; // calculation check Interface::matrix_to_stl(X,resu_stl); STL_interface::ata_product(A_stl,X_stl,_size); typename Interface::real_type error= STL_interface::norm_diff(X_stl,resu_stl); if (error>1.e-6){ INFOS("WRONG CALCULATION...residual=" << error); exit(1); } } private : typename Interface::stl_matrix A_stl; typename Interface::stl_matrix X_stl; typename Interface::stl_matrix resu_stl; typename Interface::gene_matrix A_ref; typename Interface::gene_matrix X_ref; typename Interface::gene_matrix A; typename Interface::gene_matrix X; int _size; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_atv_product.hh ================================================ //===================================================== // File : action_atv_product.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:19 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_ATV_PRODUCT #define ACTION_ATV_PRODUCT #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_atv_product { public : Action_atv_product( int size ) : _size(size) { MESSAGE("Action_atv_product Ctor"); // STL matrix and vector initialization init_matrix(A_stl,_size); init_vector(B_stl,_size); init_vector(X_stl,_size); init_vector(resu_stl,_size); // generic matrix and vector initialization Interface::matrix_from_stl(A_ref,A_stl); Interface::vector_from_stl(B_ref,B_stl); Interface::vector_from_stl(X_ref,X_stl); Interface::matrix_from_stl(A,A_stl); Interface::vector_from_stl(B,B_stl); Interface::vector_from_stl(X,X_stl); } // invalidate copy ctor Action_atv_product( const Action_atv_product & ) { INFOS("illegal call to Action_atv_product Copy Ctor"); exit(1); } ~Action_atv_product( void ) { MESSAGE("Action_atv_product Dtor"); Interface::free_matrix(A,_size); Interface::free_vector(B); Interface::free_vector(X); Interface::free_matrix(A_ref,_size); Interface::free_vector(B_ref); Interface::free_vector(X_ref); } static inline std::string name() { return "atv_" + Interface::name(); } double nb_op_base( void ) { return 2.0*_size*_size; } inline void initialize( void ){ Interface::copy_matrix(A_ref,A,_size); Interface::copy_vector(B_ref,B,_size); Interface::copy_vector(X_ref,X,_size); } BTL_DONT_INLINE void calculate( void ) { BTL_ASM_COMMENT("begin atv"); Interface::atv_product(A,B,X,_size); BTL_ASM_COMMENT("end atv"); } void check_result( void ) { if (_size>128) return; Interface::vector_to_stl(X,resu_stl); STL_interface::atv_product(A_stl,B_stl,X_stl,_size); typename Interface::real_type error= STL_interface::norm_diff(X_stl,resu_stl); if (error>1.e-6){ INFOS("WRONG CALCULATION...residual=" << error); exit(1); } } private : typename Interface::stl_matrix A_stl; typename Interface::stl_vector B_stl; typename Interface::stl_vector X_stl; typename Interface::stl_vector resu_stl; typename Interface::gene_matrix A_ref; typename Interface::gene_vector B_ref; typename Interface::gene_vector X_ref; typename Interface::gene_matrix A; typename Interface::gene_vector B; typename Interface::gene_vector X; int _size; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_axpby.hh ================================================ //===================================================== // File : action_axpby.hh // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_AXPBY #define ACTION_AXPBY #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_axpby { public : // Ctor Action_axpby( int size ):_alpha(0.5),_beta(0.95),_size(size) { MESSAGE("Action_axpby Ctor"); // STL vector initialization init_vector(X_stl,_size); init_vector(Y_stl,_size); init_vector(resu_stl,_size); // generic matrix and vector initialization Interface::vector_from_stl(X_ref,X_stl); Interface::vector_from_stl(Y_ref,Y_stl); Interface::vector_from_stl(X,X_stl); Interface::vector_from_stl(Y,Y_stl); } // invalidate copy ctor Action_axpby( const Action_axpby & ) { INFOS("illegal call to Action_axpby Copy Ctor"); exit(1); } // Dtor ~Action_axpby( void ){ MESSAGE("Action_axpby Dtor"); // deallocation Interface::free_vector(X_ref); Interface::free_vector(Y_ref); Interface::free_vector(X); Interface::free_vector(Y); } // action name static inline std::string name( void ) { return "axpby_"+Interface::name(); } double nb_op_base( void ){ return 3.0*_size; } inline void initialize( void ){ Interface::copy_vector(X_ref,X,_size); Interface::copy_vector(Y_ref,Y,_size); } inline void calculate( void ) { BTL_ASM_COMMENT("mybegin axpby"); Interface::axpby(_alpha,X,_beta,Y,_size); BTL_ASM_COMMENT("myend axpby"); } void check_result( void ){ if (_size>128) return; // calculation check Interface::vector_to_stl(Y,resu_stl); STL_interface::axpby(_alpha,X_stl,_beta,Y_stl,_size); typename Interface::real_type error= STL_interface::norm_diff(Y_stl,resu_stl); if (error>1.e-6){ INFOS("WRONG CALCULATION...residual=" << error); exit(2); } } private : typename Interface::stl_vector X_stl; typename Interface::stl_vector Y_stl; typename Interface::stl_vector resu_stl; typename Interface::gene_vector X_ref; typename Interface::gene_vector Y_ref; typename Interface::gene_vector X; typename Interface::gene_vector Y; typename Interface::real_type _alpha; typename Interface::real_type _beta; int _size; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_axpy.hh ================================================ //===================================================== // File : action_axpy.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:19 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_AXPY #define ACTION_AXPY #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_axpy { public : // Ctor Action_axpy( int size ):_coef(1.0),_size(size) { MESSAGE("Action_axpy Ctor"); // STL vector initialization init_vector(X_stl,_size); init_vector(Y_stl,_size); init_vector(resu_stl,_size); // generic matrix and vector initialization Interface::vector_from_stl(X_ref,X_stl); Interface::vector_from_stl(Y_ref,Y_stl); Interface::vector_from_stl(X,X_stl); Interface::vector_from_stl(Y,Y_stl); } // invalidate copy ctor Action_axpy( const Action_axpy & ) { INFOS("illegal call to Action_axpy Copy Ctor"); exit(1); } // Dtor ~Action_axpy( void ){ MESSAGE("Action_axpy Dtor"); // deallocation Interface::free_vector(X_ref); Interface::free_vector(Y_ref); Interface::free_vector(X); Interface::free_vector(Y); } // action name static inline std::string name( void ) { return "axpy_"+Interface::name(); } double nb_op_base( void ){ return 2.0*_size; } inline void initialize( void ){ Interface::copy_vector(X_ref,X,_size); Interface::copy_vector(Y_ref,Y,_size); } inline void calculate( void ) { BTL_ASM_COMMENT("mybegin axpy"); Interface::axpy(_coef,X,Y,_size); BTL_ASM_COMMENT("myend axpy"); } void check_result( void ){ if (_size>128) return; // calculation check Interface::vector_to_stl(Y,resu_stl); STL_interface::axpy(_coef,X_stl,Y_stl,_size); typename Interface::real_type error= STL_interface::norm_diff(Y_stl,resu_stl); if (error>1.e-6){ INFOS("WRONG CALCULATION...residual=" << error); exit(0); } } private : typename Interface::stl_vector X_stl; typename Interface::stl_vector Y_stl; typename Interface::stl_vector resu_stl; typename Interface::gene_vector X_ref; typename Interface::gene_vector Y_ref; typename Interface::gene_vector X; typename Interface::gene_vector Y; typename Interface::real_type _coef; int _size; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_cholesky.hh ================================================ //===================================================== // File : action_cholesky.hh // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_CHOLESKY #define ACTION_CHOLESKY #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_cholesky { public : // Ctor Action_cholesky( int size ):_size(size) { MESSAGE("Action_cholesky Ctor"); // STL mat/vec initialization init_matrix_symm(X_stl,_size); init_matrix(C_stl,_size); // make sure X is invertible for (int i=0; i<_size; ++i) X_stl[i][i] = std::abs(X_stl[i][i]) * 1e2 + 100; // generic matrix and vector initialization Interface::matrix_from_stl(X_ref,X_stl); Interface::matrix_from_stl(X,X_stl); Interface::matrix_from_stl(C,C_stl); _cost = 0; for (int j=0; j<_size; ++j) { double r = std::max(_size - j -1,0); _cost += 2*(r*j+r+j); } } // invalidate copy ctor Action_cholesky( const Action_cholesky & ) { INFOS("illegal call to Action_cholesky Copy Ctor"); exit(1); } // Dtor ~Action_cholesky( void ){ MESSAGE("Action_cholesky Dtor"); // deallocation Interface::free_matrix(X_ref,_size); Interface::free_matrix(X,_size); Interface::free_matrix(C,_size); } // action name static inline std::string name( void ) { return "cholesky_"+Interface::name(); } double nb_op_base( void ){ return _cost; } inline void initialize( void ){ Interface::copy_matrix(X_ref,X,_size); } inline void calculate( void ) { Interface::cholesky(X,C,_size); } void check_result( void ){ // calculation check // STL_interface::cholesky(X_stl,C_stl,_size); // // typename Interface::real_type error= // STL_interface::norm_diff(C_stl,resu_stl); // // if (error>1.e-6){ // INFOS("WRONG CALCULATION...residual=" << error); // exit(0); // } } private : typename Interface::stl_matrix X_stl; typename Interface::stl_matrix C_stl; typename Interface::gene_matrix X_ref; typename Interface::gene_matrix X; typename Interface::gene_matrix C; int _size; double _cost; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_ger.hh ================================================ // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_GER #define ACTION_GER #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_ger { public : // Ctor BTL_DONT_INLINE Action_ger( int size ):_size(size) { MESSAGE("Action_ger Ctor"); // STL matrix and vector initialization typename Interface::stl_matrix tmp; init_matrix(A_stl,_size); init_vector(B_stl,_size); init_vector(X_stl,_size); init_vector(resu_stl,_size); // generic matrix and vector initialization Interface::matrix_from_stl(A_ref,A_stl); Interface::matrix_from_stl(A,A_stl); Interface::vector_from_stl(B_ref,B_stl); Interface::vector_from_stl(B,B_stl); Interface::vector_from_stl(X_ref,X_stl); Interface::vector_from_stl(X,X_stl); } // invalidate copy ctor Action_ger( const Action_ger & ) { INFOS("illegal call to Action_ger Copy Ctor"); exit(1); } // Dtor BTL_DONT_INLINE ~Action_ger( void ){ MESSAGE("Action_ger Dtor"); Interface::free_matrix(A,_size); Interface::free_vector(B); Interface::free_vector(X); Interface::free_matrix(A_ref,_size); Interface::free_vector(B_ref); Interface::free_vector(X_ref); } // action name static inline std::string name( void ) { return "ger_" + Interface::name(); } double nb_op_base( void ){ return 2.0*_size*_size; } BTL_DONT_INLINE void initialize( void ){ Interface::copy_matrix(A_ref,A,_size); Interface::copy_vector(B_ref,B,_size); Interface::copy_vector(X_ref,X,_size); } BTL_DONT_INLINE void calculate( void ) { BTL_ASM_COMMENT("#begin ger"); Interface::ger(A,B,X,_size); BTL_ASM_COMMENT("end ger"); } BTL_DONT_INLINE void check_result( void ){ // calculation check Interface::vector_to_stl(X,resu_stl); STL_interface::ger(A_stl,B_stl,X_stl,_size); typename Interface::real_type error= STL_interface::norm_diff(X_stl,resu_stl); if (error>1.e-3){ INFOS("WRONG CALCULATION...residual=" << error); // exit(0); } } private : typename Interface::stl_matrix A_stl; typename Interface::stl_vector B_stl; typename Interface::stl_vector X_stl; typename Interface::stl_vector resu_stl; typename Interface::gene_matrix A_ref; typename Interface::gene_vector B_ref; typename Interface::gene_vector X_ref; typename Interface::gene_matrix A; typename Interface::gene_vector B; typename Interface::gene_vector X; int _size; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_hessenberg.hh ================================================ //===================================================== // File : action_hessenberg.hh // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_HESSENBERG #define ACTION_HESSENBERG #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_hessenberg { public : // Ctor Action_hessenberg( int size ):_size(size) { MESSAGE("Action_hessenberg Ctor"); // STL vector initialization init_matrix(X_stl,_size); init_matrix(C_stl,_size); init_matrix(resu_stl,_size); // generic matrix and vector initialization Interface::matrix_from_stl(X_ref,X_stl); Interface::matrix_from_stl(X,X_stl); Interface::matrix_from_stl(C,C_stl); _cost = 0; for (int j=0; j<_size-2; ++j) { double r = std::max(0,_size-j-1); double b = std::max(0,_size-j-2); _cost += 6 + 3*b + r*r*4 + r*_size*4; } } // invalidate copy ctor Action_hessenberg( const Action_hessenberg & ) { INFOS("illegal call to Action_hessenberg Copy Ctor"); exit(1); } // Dtor ~Action_hessenberg( void ){ MESSAGE("Action_hessenberg Dtor"); // deallocation Interface::free_matrix(X_ref,_size); Interface::free_matrix(X,_size); Interface::free_matrix(C,_size); } // action name static inline std::string name( void ) { return "hessenberg_"+Interface::name(); } double nb_op_base( void ){ return _cost; } inline void initialize( void ){ Interface::copy_matrix(X_ref,X,_size); } inline void calculate( void ) { Interface::hessenberg(X,C,_size); } void check_result( void ){ // calculation check Interface::matrix_to_stl(C,resu_stl); // STL_interface::hessenberg(X_stl,C_stl,_size); // // typename Interface::real_type error= // STL_interface::norm_diff(C_stl,resu_stl); // // if (error>1.e-6){ // INFOS("WRONG CALCULATION...residual=" << error); // exit(0); // } } private : typename Interface::stl_matrix X_stl; typename Interface::stl_matrix C_stl; typename Interface::stl_matrix resu_stl; typename Interface::gene_matrix X_ref; typename Interface::gene_matrix X; typename Interface::gene_matrix C; int _size; double _cost; }; template class Action_tridiagonalization { public : // Ctor Action_tridiagonalization( int size ):_size(size) { MESSAGE("Action_tridiagonalization Ctor"); // STL vector initialization init_matrix(X_stl,_size); for(int i=0; i<_size; ++i) { for(int j=0; j(C_stl,_size); init_matrix(resu_stl,_size); // generic matrix and vector initialization Interface::matrix_from_stl(X_ref,X_stl); Interface::matrix_from_stl(X,X_stl); Interface::matrix_from_stl(C,C_stl); _cost = 0; for (int j=0; j<_size-2; ++j) { double r = std::max(0,_size-j-1); double b = std::max(0,_size-j-2); _cost += 6. + 3.*b + r*r*8.; } } // invalidate copy ctor Action_tridiagonalization( const Action_tridiagonalization & ) { INFOS("illegal call to Action_tridiagonalization Copy Ctor"); exit(1); } // Dtor ~Action_tridiagonalization( void ){ MESSAGE("Action_tridiagonalization Dtor"); // deallocation Interface::free_matrix(X_ref,_size); Interface::free_matrix(X,_size); Interface::free_matrix(C,_size); } // action name static inline std::string name( void ) { return "tridiagonalization_"+Interface::name(); } double nb_op_base( void ){ return _cost; } inline void initialize( void ){ Interface::copy_matrix(X_ref,X,_size); } inline void calculate( void ) { Interface::tridiagonalization(X,C,_size); } void check_result( void ){ // calculation check Interface::matrix_to_stl(C,resu_stl); // STL_interface::tridiagonalization(X_stl,C_stl,_size); // // typename Interface::real_type error= // STL_interface::norm_diff(C_stl,resu_stl); // // if (error>1.e-6){ // INFOS("WRONG CALCULATION...residual=" << error); // exit(0); // } } private : typename Interface::stl_matrix X_stl; typename Interface::stl_matrix C_stl; typename Interface::stl_matrix resu_stl; typename Interface::gene_matrix X_ref; typename Interface::gene_matrix X; typename Interface::gene_matrix C; int _size; double _cost; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_lu_decomp.hh ================================================ //===================================================== // File : action_lu_decomp.hh // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_LU_DECOMP #define ACTION_LU_DECOMP #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_lu_decomp { public : // Ctor Action_lu_decomp( int size ):_size(size) { MESSAGE("Action_lu_decomp Ctor"); // STL vector initialization init_matrix(X_stl,_size); init_matrix(C_stl,_size); init_matrix(resu_stl,_size); // generic matrix and vector initialization Interface::matrix_from_stl(X_ref,X_stl); Interface::matrix_from_stl(X,X_stl); Interface::matrix_from_stl(C,C_stl); _cost = 2.0*size*size*size/3.0 + size*size; } // invalidate copy ctor Action_lu_decomp( const Action_lu_decomp & ) { INFOS("illegal call to Action_lu_decomp Copy Ctor"); exit(1); } // Dtor ~Action_lu_decomp( void ){ MESSAGE("Action_lu_decomp Dtor"); // deallocation Interface::free_matrix(X_ref,_size); Interface::free_matrix(X,_size); Interface::free_matrix(C,_size); } // action name static inline std::string name( void ) { return "complete_lu_decomp_"+Interface::name(); } double nb_op_base( void ){ return _cost; } inline void initialize( void ){ Interface::copy_matrix(X_ref,X,_size); } inline void calculate( void ) { Interface::lu_decomp(X,C,_size); } void check_result( void ){ // calculation check Interface::matrix_to_stl(C,resu_stl); // STL_interface::lu_decomp(X_stl,C_stl,_size); // // typename Interface::real_type error= // STL_interface::norm_diff(C_stl,resu_stl); // // if (error>1.e-6){ // INFOS("WRONG CALCULATION...residual=" << error); // exit(0); // } } private : typename Interface::stl_matrix X_stl; typename Interface::stl_matrix C_stl; typename Interface::stl_matrix resu_stl; typename Interface::gene_matrix X_ref; typename Interface::gene_matrix X; typename Interface::gene_matrix C; int _size; double _cost; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_lu_solve.hh ================================================ //===================================================== // File : action_lu_solve.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:19 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_LU_SOLVE #define ACTION_LU_SOLVE #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_lu_solve { public : static inline std::string name( void ) { return "lu_solve_"+Interface::name(); } static double nb_op_base(int size){ return 2.0*size*size*size/3.0; // questionable but not really important } static double calculate( int nb_calc, int size ) { // STL matrix and vector initialization typename Interface::stl_matrix A_stl; typename Interface::stl_vector B_stl; typename Interface::stl_vector X_stl; init_matrix(A_stl,size); init_vector(B_stl,size); init_vector(X_stl,size); // generic matrix and vector initialization typename Interface::gene_matrix A; typename Interface::gene_vector B; typename Interface::gene_vector X; typename Interface::gene_matrix LU; Interface::matrix_from_stl(A,A_stl); Interface::vector_from_stl(B,B_stl); Interface::vector_from_stl(X,X_stl); Interface::matrix_from_stl(LU,A_stl); // local variable : typename Interface::Pivot_Vector pivot; // pivot vector Interface::new_Pivot_Vector(pivot,size); // timer utilities Portable_Timer chronos; // time measurement chronos.start(); for (int ii=0;ii::matrix_vector_product(A_stl,X_stl,B_new_stl,size); typename Interface::real_type error= STL_interface::norm_diff(B_stl,B_new_stl); if (error>1.e-5){ INFOS("WRONG CALCULATION...residual=" << error); STL_interface::display_vector(B_stl); STL_interface::display_vector(B_new_stl); exit(0); } // deallocation and return time Interface::free_matrix(A,size); Interface::free_vector(B); Interface::free_vector(X); Interface::free_Pivot_Vector(pivot); return time; } }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_matrix_matrix_product.hh ================================================ //===================================================== // File : action_matrix_matrix_product.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:19 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_MATRIX_MATRIX_PRODUCT #define ACTION_MATRIX_MATRIX_PRODUCT #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_matrix_matrix_product { public : // Ctor Action_matrix_matrix_product( int size ):_size(size) { MESSAGE("Action_matrix_matrix_product Ctor"); // STL matrix and vector initialization init_matrix(A_stl,_size); init_matrix(B_stl,_size); init_matrix(X_stl,_size); init_matrix(resu_stl,_size); // generic matrix and vector initialization Interface::matrix_from_stl(A_ref,A_stl); Interface::matrix_from_stl(B_ref,B_stl); Interface::matrix_from_stl(X_ref,X_stl); Interface::matrix_from_stl(A,A_stl); Interface::matrix_from_stl(B,B_stl); Interface::matrix_from_stl(X,X_stl); } // invalidate copy ctor Action_matrix_matrix_product( const Action_matrix_matrix_product & ) { INFOS("illegal call to Action_matrix_matrix_product Copy Ctor"); exit(0); } // Dtor ~Action_matrix_matrix_product( void ){ MESSAGE("Action_matrix_matrix_product Dtor"); // deallocation Interface::free_matrix(A,_size); Interface::free_matrix(B,_size); Interface::free_matrix(X,_size); Interface::free_matrix(A_ref,_size); Interface::free_matrix(B_ref,_size); Interface::free_matrix(X_ref,_size); } // action name static inline std::string name( void ) { return "matrix_matrix_"+Interface::name(); } double nb_op_base( void ){ return 2.0*_size*_size*_size; } inline void initialize( void ){ Interface::copy_matrix(A_ref,A,_size); Interface::copy_matrix(B_ref,B,_size); Interface::copy_matrix(X_ref,X,_size); } inline void calculate( void ) { Interface::matrix_matrix_product(A,B,X,_size); } void check_result( void ){ // calculation check if (_size<200) { Interface::matrix_to_stl(X,resu_stl); STL_interface::matrix_matrix_product(A_stl,B_stl,X_stl,_size); typename Interface::real_type error= STL_interface::norm_diff(X_stl,resu_stl); if (error>1.e-6){ INFOS("WRONG CALCULATION...residual=" << error); exit(1); } } } private : typename Interface::stl_matrix A_stl; typename Interface::stl_matrix B_stl; typename Interface::stl_matrix X_stl; typename Interface::stl_matrix resu_stl; typename Interface::gene_matrix A_ref; typename Interface::gene_matrix B_ref; typename Interface::gene_matrix X_ref; typename Interface::gene_matrix A; typename Interface::gene_matrix B; typename Interface::gene_matrix X; int _size; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_matrix_matrix_product_bis.hh ================================================ //===================================================== // File : action_matrix_matrix_product_bis.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:19 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_MATRIX_MATRIX_PRODUCT_BIS #define ACTION_MATRIX_MATRIX_PRODUCT_BIS #include "utilities.h" #include "STL_interface.hh" #include "STL_timer.hh" #include #include "init_function.hh" #include "init_vector.hh" #include "init_matrix.hh" using namespace std; template class Action_matrix_matrix_product_bis { public : static inline std::string name( void ) { return "matrix_matrix_"+Interface::name(); } static double nb_op_base(int size){ return 2.0*size*size*size; } static double calculate( int nb_calc, int size ) { // STL matrix and vector initialization typename Interface::stl_matrix A_stl; typename Interface::stl_matrix B_stl; typename Interface::stl_matrix X_stl; init_matrix(A_stl,size); init_matrix(B_stl,size); init_matrix(X_stl,size); // generic matrix and vector initialization typename Interface::gene_matrix A_ref; typename Interface::gene_matrix B_ref; typename Interface::gene_matrix X_ref; typename Interface::gene_matrix A; typename Interface::gene_matrix B; typename Interface::gene_matrix X; Interface::matrix_from_stl(A_ref,A_stl); Interface::matrix_from_stl(B_ref,B_stl); Interface::matrix_from_stl(X_ref,X_stl); Interface::matrix_from_stl(A,A_stl); Interface::matrix_from_stl(B,B_stl); Interface::matrix_from_stl(X,X_stl); // STL_timer utilities STL_timer chronos; // Baseline evaluation chronos.start_baseline(nb_calc); do { Interface::copy_matrix(A_ref,A,size); Interface::copy_matrix(B_ref,B,size); Interface::copy_matrix(X_ref,X,size); // Interface::matrix_matrix_product(A,B,X,size); This line must be commented !!!! } while(chronos.check()); chronos.report(true); // Time measurement chronos.start(nb_calc); do { Interface::copy_matrix(A_ref,A,size); Interface::copy_matrix(B_ref,B,size); Interface::copy_matrix(X_ref,X,size); Interface::matrix_matrix_product(A,B,X,size); // here it is not commented !!!! } while(chronos.check()); chronos.report(true); double time=chronos.calculated_time/2000.0; // calculation check typename Interface::stl_matrix resu_stl(size); Interface::matrix_to_stl(X,resu_stl); STL_interface::matrix_matrix_product(A_stl,B_stl,X_stl,size); typename Interface::real_type error= STL_interface::norm_diff(X_stl,resu_stl); if (error>1.e-6){ INFOS("WRONG CALCULATION...residual=" << error); exit(1); } // deallocation and return time Interface::free_matrix(A,size); Interface::free_matrix(B,size); Interface::free_matrix(X,size); Interface::free_matrix(A_ref,size); Interface::free_matrix(B_ref,size); Interface::free_matrix(X_ref,size); return time; } }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_matrix_vector_product.hh ================================================ //===================================================== // File : action_matrix_vector_product.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:19 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_MATRIX_VECTOR_PRODUCT #define ACTION_MATRIX_VECTOR_PRODUCT #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_matrix_vector_product { public : // Ctor BTL_DONT_INLINE Action_matrix_vector_product( int size ):_size(size) { MESSAGE("Action_matrix_vector_product Ctor"); // STL matrix and vector initialization init_matrix(A_stl,_size); init_vector(B_stl,_size); init_vector(X_stl,_size); init_vector(resu_stl,_size); // generic matrix and vector initialization Interface::matrix_from_stl(A_ref,A_stl); Interface::matrix_from_stl(A,A_stl); Interface::vector_from_stl(B_ref,B_stl); Interface::vector_from_stl(B,B_stl); Interface::vector_from_stl(X_ref,X_stl); Interface::vector_from_stl(X,X_stl); } // invalidate copy ctor Action_matrix_vector_product( const Action_matrix_vector_product & ) { INFOS("illegal call to Action_matrix_vector_product Copy Ctor"); exit(1); } // Dtor BTL_DONT_INLINE ~Action_matrix_vector_product( void ){ MESSAGE("Action_matrix_vector_product Dtor"); // deallocation Interface::free_matrix(A,_size); Interface::free_vector(B); Interface::free_vector(X); Interface::free_matrix(A_ref,_size); Interface::free_vector(B_ref); Interface::free_vector(X_ref); } // action name static inline std::string name( void ) { return "matrix_vector_" + Interface::name(); } double nb_op_base( void ){ return 2.0*_size*_size; } BTL_DONT_INLINE void initialize( void ){ Interface::copy_matrix(A_ref,A,_size); Interface::copy_vector(B_ref,B,_size); Interface::copy_vector(X_ref,X,_size); } BTL_DONT_INLINE void calculate( void ) { BTL_ASM_COMMENT("#begin matrix_vector_product"); Interface::matrix_vector_product(A,B,X,_size); BTL_ASM_COMMENT("end matrix_vector_product"); } BTL_DONT_INLINE void check_result( void ){ // calculation check Interface::vector_to_stl(X,resu_stl); STL_interface::matrix_vector_product(A_stl,B_stl,X_stl,_size); typename Interface::real_type error= STL_interface::norm_diff(X_stl,resu_stl); if (error>1.e-5){ INFOS("WRONG CALCULATION...residual=" << error); exit(0); } } private : typename Interface::stl_matrix A_stl; typename Interface::stl_vector B_stl; typename Interface::stl_vector X_stl; typename Interface::stl_vector resu_stl; typename Interface::gene_matrix A_ref; typename Interface::gene_vector B_ref; typename Interface::gene_vector X_ref; typename Interface::gene_matrix A; typename Interface::gene_vector B; typename Interface::gene_vector X; int _size; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_partial_lu.hh ================================================ //===================================================== // File : action_lu_decomp.hh // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_PARTIAL_LU #define ACTION_PARTIAL_LU #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_partial_lu { public : // Ctor Action_partial_lu( int size ):_size(size) { MESSAGE("Action_partial_lu Ctor"); // STL vector initialization init_matrix(X_stl,_size); init_matrix(C_stl,_size); // make sure X is invertible for (int i=0; i<_size; ++i) X_stl[i][i] = X_stl[i][i] * 1e2 + 1; // generic matrix and vector initialization Interface::matrix_from_stl(X_ref,X_stl); Interface::matrix_from_stl(X,X_stl); Interface::matrix_from_stl(C,C_stl); _cost = 2.0*size*size*size/3.0 + size*size; } // invalidate copy ctor Action_partial_lu( const Action_partial_lu & ) { INFOS("illegal call to Action_partial_lu Copy Ctor"); exit(1); } // Dtor ~Action_partial_lu( void ){ MESSAGE("Action_partial_lu Dtor"); // deallocation Interface::free_matrix(X_ref,_size); Interface::free_matrix(X,_size); Interface::free_matrix(C,_size); } // action name static inline std::string name( void ) { return "partial_lu_decomp_"+Interface::name(); } double nb_op_base( void ){ return _cost; } inline void initialize( void ){ Interface::copy_matrix(X_ref,X,_size); } inline void calculate( void ) { Interface::partial_lu_decomp(X,C,_size); } void check_result( void ){ // calculation check // Interface::matrix_to_stl(C,resu_stl); // STL_interface::lu_decomp(X_stl,C_stl,_size); // // typename Interface::real_type error= // STL_interface::norm_diff(C_stl,resu_stl); // // if (error>1.e-6){ // INFOS("WRONG CALCULATION...residual=" << error); // exit(0); // } } private : typename Interface::stl_matrix X_stl; typename Interface::stl_matrix C_stl; typename Interface::gene_matrix X_ref; typename Interface::gene_matrix X; typename Interface::gene_matrix C; int _size; double _cost; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_rot.hh ================================================ // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_ROT #define ACTION_ROT #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_rot { public : // Ctor BTL_DONT_INLINE Action_rot( int size ):_size(size) { MESSAGE("Action_rot Ctor"); // STL matrix and vector initialization typename Interface::stl_matrix tmp; init_vector(A_stl,_size); init_vector(B_stl,_size); // generic matrix and vector initialization Interface::vector_from_stl(A_ref,A_stl); Interface::vector_from_stl(A,A_stl); Interface::vector_from_stl(B_ref,B_stl); Interface::vector_from_stl(B,B_stl); } // invalidate copy ctor Action_rot( const Action_rot & ) { INFOS("illegal call to Action_rot Copy Ctor"); exit(1); } // Dtor BTL_DONT_INLINE ~Action_rot( void ){ MESSAGE("Action_rot Dtor"); Interface::free_vector(A); Interface::free_vector(B); Interface::free_vector(A_ref); Interface::free_vector(B_ref); } // action name static inline std::string name( void ) { return "rot_" + Interface::name(); } double nb_op_base( void ){ return 6.0*_size; } BTL_DONT_INLINE void initialize( void ){ Interface::copy_vector(A_ref,A,_size); Interface::copy_vector(B_ref,B,_size); } BTL_DONT_INLINE void calculate( void ) { BTL_ASM_COMMENT("#begin rot"); Interface::rot(A,B,0.5,0.6,_size); BTL_ASM_COMMENT("end rot"); } BTL_DONT_INLINE void check_result( void ){ // calculation check // Interface::vector_to_stl(X,resu_stl); // STL_interface::rot(A_stl,B_stl,X_stl,_size); // typename Interface::real_type error= // STL_interface::norm_diff(X_stl,resu_stl); // if (error>1.e-3){ // INFOS("WRONG CALCULATION...residual=" << error); // exit(0); // } } private : typename Interface::stl_vector A_stl; typename Interface::stl_vector B_stl; typename Interface::gene_vector A_ref; typename Interface::gene_vector B_ref; typename Interface::gene_vector A; typename Interface::gene_vector B; int _size; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_symv.hh ================================================ //===================================================== // File : action_symv.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:19 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_SYMV #define ACTION_SYMV #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_symv { public : // Ctor BTL_DONT_INLINE Action_symv( int size ):_size(size) { MESSAGE("Action_symv Ctor"); // STL matrix and vector initialization init_matrix_symm(A_stl,_size); init_vector(B_stl,_size); init_vector(X_stl,_size); init_vector(resu_stl,_size); // generic matrix and vector initialization Interface::matrix_from_stl(A_ref,A_stl); Interface::matrix_from_stl(A,A_stl); Interface::vector_from_stl(B_ref,B_stl); Interface::vector_from_stl(B,B_stl); Interface::vector_from_stl(X_ref,X_stl); Interface::vector_from_stl(X,X_stl); } // invalidate copy ctor Action_symv( const Action_symv & ) { INFOS("illegal call to Action_symv Copy Ctor"); exit(1); } // Dtor BTL_DONT_INLINE ~Action_symv( void ){ Interface::free_matrix(A,_size); Interface::free_vector(B); Interface::free_vector(X); Interface::free_matrix(A_ref,_size); Interface::free_vector(B_ref); Interface::free_vector(X_ref); } // action name static inline std::string name( void ) { return "symv_" + Interface::name(); } double nb_op_base( void ){ return 2.0*_size*_size; } BTL_DONT_INLINE void initialize( void ){ Interface::copy_matrix(A_ref,A,_size); Interface::copy_vector(B_ref,B,_size); Interface::copy_vector(X_ref,X,_size); } BTL_DONT_INLINE void calculate( void ) { BTL_ASM_COMMENT("#begin symv"); Interface::symv(A,B,X,_size); BTL_ASM_COMMENT("end symv"); } BTL_DONT_INLINE void check_result( void ){ if (_size>128) return; // calculation check Interface::vector_to_stl(X,resu_stl); STL_interface::symv(A_stl,B_stl,X_stl,_size); typename Interface::real_type error= STL_interface::norm_diff(X_stl,resu_stl); if (error>1.e-5){ INFOS("WRONG CALCULATION...residual=" << error); exit(0); } } private : typename Interface::stl_matrix A_stl; typename Interface::stl_vector B_stl; typename Interface::stl_vector X_stl; typename Interface::stl_vector resu_stl; typename Interface::gene_matrix A_ref; typename Interface::gene_vector B_ref; typename Interface::gene_vector X_ref; typename Interface::gene_matrix A; typename Interface::gene_vector B; typename Interface::gene_vector X; int _size; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_syr2.hh ================================================ //===================================================== // File : action_syr2.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:19 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_SYR2 #define ACTION_SYR2 #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_syr2 { public : // Ctor BTL_DONT_INLINE Action_syr2( int size ):_size(size) { // STL matrix and vector initialization typename Interface::stl_matrix tmp; init_matrix(A_stl,_size); init_vector(B_stl,_size); init_vector(X_stl,_size); init_vector(resu_stl,_size); // generic matrix and vector initialization Interface::matrix_from_stl(A_ref,A_stl); Interface::matrix_from_stl(A,A_stl); Interface::vector_from_stl(B_ref,B_stl); Interface::vector_from_stl(B,B_stl); Interface::vector_from_stl(X_ref,X_stl); Interface::vector_from_stl(X,X_stl); } // invalidate copy ctor Action_syr2( const Action_syr2 & ) { INFOS("illegal call to Action_syr2 Copy Ctor"); exit(1); } // Dtor BTL_DONT_INLINE ~Action_syr2( void ){ Interface::free_matrix(A,_size); Interface::free_vector(B); Interface::free_vector(X); Interface::free_matrix(A_ref,_size); Interface::free_vector(B_ref); Interface::free_vector(X_ref); } // action name static inline std::string name( void ) { return "syr2_" + Interface::name(); } double nb_op_base( void ){ return 2.0*_size*_size; } BTL_DONT_INLINE void initialize( void ){ Interface::copy_matrix(A_ref,A,_size); Interface::copy_vector(B_ref,B,_size); Interface::copy_vector(X_ref,X,_size); } BTL_DONT_INLINE void calculate( void ) { BTL_ASM_COMMENT("#begin syr2"); Interface::syr2(A,B,X,_size); BTL_ASM_COMMENT("end syr2"); } BTL_DONT_INLINE void check_result( void ){ // calculation check Interface::vector_to_stl(X,resu_stl); STL_interface::syr2(A_stl,B_stl,X_stl,_size); typename Interface::real_type error= STL_interface::norm_diff(X_stl,resu_stl); if (error>1.e-3){ INFOS("WRONG CALCULATION...residual=" << error); // exit(0); } } private : typename Interface::stl_matrix A_stl; typename Interface::stl_vector B_stl; typename Interface::stl_vector X_stl; typename Interface::stl_vector resu_stl; typename Interface::gene_matrix A_ref; typename Interface::gene_vector B_ref; typename Interface::gene_vector X_ref; typename Interface::gene_matrix A; typename Interface::gene_vector B; typename Interface::gene_vector X; int _size; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_trisolve.hh ================================================ //===================================================== // File : action_trisolve.hh // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_TRISOLVE #define ACTION_TRISOLVE #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_trisolve { public : // Ctor Action_trisolve( int size ):_size(size) { MESSAGE("Action_trisolve Ctor"); // STL vector initialization init_matrix(L_stl,_size); init_vector(B_stl,_size); init_vector(X_stl,_size); for (int j=0; j<_size; ++j) { for (int i=0; i(resu_stl,_size); // generic matrix and vector initialization Interface::matrix_from_stl(L,L_stl); Interface::vector_from_stl(X,X_stl); Interface::vector_from_stl(B,B_stl); _cost = 0; for (int j=0; j<_size; ++j) { _cost += 2*j + 1; } } // invalidate copy ctor Action_trisolve( const Action_trisolve & ) { INFOS("illegal call to Action_trisolve Copy Ctor"); exit(1); } // Dtor ~Action_trisolve( void ){ MESSAGE("Action_trisolve Dtor"); // deallocation Interface::free_matrix(L,_size); Interface::free_vector(B); Interface::free_vector(X); } // action name static inline std::string name( void ) { return "trisolve_vector_"+Interface::name(); } double nb_op_base( void ){ return _cost; } inline void initialize( void ){ //Interface::copy_vector(X_ref,X,_size); } inline void calculate( void ) { Interface::trisolve_lower(L,B,X,_size); } void check_result(){ if (_size>128) return; // calculation check Interface::vector_to_stl(X,resu_stl); STL_interface::trisolve_lower(L_stl,B_stl,X_stl,_size); typename Interface::real_type error= STL_interface::norm_diff(X_stl,resu_stl); if (error>1.e-4){ INFOS("WRONG CALCULATION...residual=" << error); exit(2); } //else INFOS("CALCULATION OK...residual=" << error); } private : typename Interface::stl_matrix L_stl; typename Interface::stl_vector X_stl; typename Interface::stl_vector B_stl; typename Interface::stl_vector resu_stl; typename Interface::gene_matrix L; typename Interface::gene_vector X; typename Interface::gene_vector B; int _size; double _cost; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_trisolve_matrix.hh ================================================ //===================================================== // File : action_matrix_matrix_product.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:19 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_TRISOLVE_MATRIX_PRODUCT #define ACTION_TRISOLVE_MATRIX_PRODUCT #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_trisolve_matrix { public : // Ctor Action_trisolve_matrix( int size ):_size(size) { MESSAGE("Action_trisolve_matrix Ctor"); // STL matrix and vector initialization init_matrix(A_stl,_size); init_matrix(B_stl,_size); init_matrix(X_stl,_size); init_matrix(resu_stl,_size); for (int j=0; j<_size; ++j) { for (int i=0; i::matrix_matrix_product(A_stl,B_stl,X_stl,_size); // // typename Interface::real_type error= // STL_interface::norm_diff(X_stl,resu_stl); // // if (error>1.e-6){ // INFOS("WRONG CALCULATION...residual=" << error); // // exit(1); // } } private : typename Interface::stl_matrix A_stl; typename Interface::stl_matrix B_stl; typename Interface::stl_matrix X_stl; typename Interface::stl_matrix resu_stl; typename Interface::gene_matrix A_ref; typename Interface::gene_matrix B_ref; typename Interface::gene_matrix X_ref; typename Interface::gene_matrix A; typename Interface::gene_matrix B; typename Interface::gene_matrix X; int _size; double _cost; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/action_trmm.hh ================================================ //===================================================== // File : action_matrix_matrix_product.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:19 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef ACTION_TRMM #define ACTION_TRMM #include "utilities.h" #include "STL_interface.hh" #include #include "init/init_function.hh" #include "init/init_vector.hh" #include "init/init_matrix.hh" using namespace std; template class Action_trmm { public : // Ctor Action_trmm( int size ):_size(size) { MESSAGE("Action_trmm Ctor"); // STL matrix and vector initialization init_matrix(A_stl,_size); init_matrix(B_stl,_size); init_matrix(X_stl,_size); init_matrix(resu_stl,_size); for (int j=0; j<_size; ++j) { for (int i=0; i::matrix_matrix_product(A_stl,B_stl,X_stl,_size); // // typename Interface::real_type error= // STL_interface::norm_diff(X_stl,resu_stl); // // if (error>1.e-6){ // INFOS("WRONG CALCULATION...residual=" << error); // // exit(1); // } } private : typename Interface::stl_matrix A_stl; typename Interface::stl_matrix B_stl; typename Interface::stl_matrix X_stl; typename Interface::stl_matrix resu_stl; typename Interface::gene_matrix A_ref; typename Interface::gene_matrix B_ref; typename Interface::gene_matrix X_ref; typename Interface::gene_matrix A; typename Interface::gene_matrix B; typename Interface::gene_matrix X; int _size; double _cost; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/actions/basic_actions.hh ================================================ #include "action_axpy.hh" #include "action_axpby.hh" #include "action_matrix_vector_product.hh" #include "action_atv_product.hh" #include "action_matrix_matrix_product.hh" #include "action_ata_product.hh" #include "action_aat_product.hh" #include "action_trisolve.hh" #include "action_trmm.hh" #include "action_symv.hh" // #include "action_symm.hh" #include "action_syr2.hh" #include "action_ger.hh" #include "action_rot.hh" // #include "action_lu_solve.hh" ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/cmake/FindACML.cmake ================================================ if (ACML_LIBRARIES) set(ACML_FIND_QUIETLY TRUE) endif () find_library(ACML_LIBRARIES NAMES acml_mp acml_mv PATHS $ENV{ACMLDIR}/lib $ENV{ACML_DIR}/lib ${LIB_INSTALL_DIR} ) find_file(ACML_LIBRARIES NAMES libacml_mp.so PATHS /usr/lib /usr/lib64 $ENV{ACMLDIR}/lib ${LIB_INSTALL_DIR} ) if(NOT ACML_LIBRARIES) message(STATUS "Multi-threaded library not found, looking for single-threaded") find_library(ACML_LIBRARIES NAMES acml acml_mv PATHS $ENV{ACMLDIR}/lib $ENV{ACML_DIR}/lib ${LIB_INSTALL_DIR} ) find_file(ACML_LIBRARIES libacml.so libacml_mv.so PATHS /usr/lib /usr/lib64 $ENV{ACMLDIR}/lib ${LIB_INSTALL_DIR} ) endif() include(FindPackageHandleStandardArgs) find_package_handle_standard_args(ACML DEFAULT_MSG ACML_LIBRARIES) mark_as_advanced(ACML_LIBRARIES) ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/cmake/FindATLAS.cmake ================================================ if (ATLAS_LIBRARIES) set(ATLAS_FIND_QUIETLY TRUE) endif () find_file(ATLAS_LIB libatlas.so.3 PATHS /usr/lib /usr/lib/atlas /usr/lib64 /usr/lib64/atlas $ENV{ATLASDIR} ${LIB_INSTALL_DIR}) find_library(ATLAS_LIB satlas PATHS $ENV{ATLASDIR} ${LIB_INSTALL_DIR}) find_file(ATLAS_LAPACK NAMES liblapack_atlas.so.3 liblapack.so.3 PATHS /usr/lib /usr/lib/atlas /usr/lib64 /usr/lib64/atlas $ENV{ATLASDIR} ${LIB_INSTALL_DIR}) find_library(ATLAS_LAPACK NAMES lapack_atlas lapack PATHS $ENV{ATLASDIR} ${LIB_INSTALL_DIR}) find_file(ATLAS_F77BLAS libf77blas.so.3 PATHS /usr/lib /usr/lib/atlas /usr/lib64 /usr/lib64/atlas $ENV{ATLASDIR} ${LIB_INSTALL_DIR}) find_library(ATLAS_F77BLAS f77blas PATHS $ENV{ATLASDIR} ${LIB_INSTALL_DIR}) if(ATLAS_LIB AND ATLAS_CBLAS AND ATLAS_LAPACK AND ATLAS_F77BLAS) set(ATLAS_LIBRARIES ${ATLAS_LAPACK} ${ATLAS_LIB}) # search the default lapack lib link to it find_file(ATLAS_REFERENCE_LAPACK liblapack.so.3 PATHS /usr/lib /usr/lib64) find_library(ATLAS_REFERENCE_LAPACK NAMES lapack) # if(ATLAS_REFERENCE_LAPACK) # set(ATLAS_LIBRARIES ${ATLAS_LIBRARIES} ${ATLAS_REFERENCE_LAPACK}) # endif() endif() include(FindPackageHandleStandardArgs) find_package_handle_standard_args(ATLAS DEFAULT_MSG ATLAS_LIBRARIES) mark_as_advanced(ATLAS_LIBRARIES) ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/cmake/FindBLAZE.cmake ================================================ # - Try to find eigen2 headers # Once done this will define # # BLAZE_FOUND - system has blaze lib # BLAZE_INCLUDE_DIR - the blaze include directory # # Copyright (C) 2008 Gael Guennebaud # Adapted from FindEigen.cmake: # Copyright (c) 2006, 2007 Montel Laurent, # Redistribution and use is allowed according to the terms of the BSD license. # For details see the accompanying COPYING-CMAKE-SCRIPTS file. if (BLAZE_INCLUDE_DIR) # in cache already set(BLAZE_FOUND TRUE) else () find_path(BLAZE_INCLUDE_DIR NAMES blaze/Blaze.h PATHS ${INCLUDE_INSTALL_DIR} ) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(BLAZE DEFAULT_MSG BLAZE_INCLUDE_DIR) mark_as_advanced(BLAZE_INCLUDE_DIR) endif() ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/cmake/FindBlitz.cmake ================================================ # - Try to find blitz lib # Once done this will define # # BLITZ_FOUND - system has blitz lib # BLITZ_INCLUDES - the blitz include directory # BLITZ_LIBRARIES - The libraries needed to use blitz # Copyright (c) 2006, Montel Laurent, # Copyright (c) 2007, Allen Winter, # Copyright (C) 2008 Gael Guennebaud # Redistribution and use is allowed according to the terms of the BSD license. # For details see the accompanying COPYING-CMAKE-SCRIPTS file. # include(FindLibraryWithDebug) if (BLITZ_INCLUDES AND BLITZ_LIBRARIES) set(Blitz_FIND_QUIETLY TRUE) endif () find_path(BLITZ_INCLUDES NAMES blitz/array.h PATH_SUFFIXES blitz* PATHS $ENV{BLITZDIR}/include ${INCLUDE_INSTALL_DIR} ) find_library(BLITZ_LIBRARIES blitz PATHS $ENV{BLITZDIR}/lib ${LIB_INSTALL_DIR} ) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(Blitz DEFAULT_MSG BLITZ_INCLUDES BLITZ_LIBRARIES) mark_as_advanced(BLITZ_INCLUDES BLITZ_LIBRARIES) ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/cmake/FindCBLAS.cmake ================================================ # include(FindLibraryWithDebug) if (CBLAS_INCLUDES AND CBLAS_LIBRARIES) set(CBLAS_FIND_QUIETLY TRUE) endif () find_path(CBLAS_INCLUDES NAMES cblas.h PATHS $ENV{CBLASDIR}/include ${INCLUDE_INSTALL_DIR} ) find_library(CBLAS_LIBRARIES cblas PATHS $ENV{CBLASDIR}/lib ${LIB_INSTALL_DIR} ) find_file(CBLAS_LIBRARIES libcblas.so.3 PATHS /usr/lib /usr/lib64 $ENV{CBLASDIR}/lib ${LIB_INSTALL_DIR} ) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(CBLAS DEFAULT_MSG CBLAS_INCLUDES CBLAS_LIBRARIES) mark_as_advanced(CBLAS_INCLUDES CBLAS_LIBRARIES) ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/cmake/FindGMM.cmake ================================================ if (GMM_INCLUDE_DIR) # in cache already set(GMM_FOUND TRUE) else () find_path(GMM_INCLUDE_DIR NAMES gmm/gmm.h PATHS ${INCLUDE_INSTALL_DIR} ${GMM_INCLUDE_PATH} ) include(FindPackageHandleStandardArgs) FIND_PACKAGE_HANDLE_STANDARD_ARGS(GMM DEFAULT_MSG GMM_INCLUDE_DIR ) mark_as_advanced(GMM_INCLUDE_DIR) endif() ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/cmake/FindMKL.cmake ================================================ if (MKL_LIBRARIES) set(MKL_FIND_QUIETLY TRUE) endif () if(CMAKE_MINOR_VERSION GREATER 4) if(${CMAKE_HOST_SYSTEM_PROCESSOR} STREQUAL "x86_64") find_library(MKL_LIBRARIES mkl_core PATHS $ENV{MKLLIB} /opt/intel/mkl/*/lib/em64t /opt/intel/Compiler/*/*/mkl/lib/em64t ${LIB_INSTALL_DIR} ) find_library(MKL_GUIDE guide PATHS $ENV{MKLLIB} /opt/intel/mkl/*/lib/em64t /opt/intel/Compiler/*/*/mkl/lib/em64t /opt/intel/Compiler/*/*/lib/intel64 ${LIB_INSTALL_DIR} ) if(MKL_LIBRARIES AND MKL_GUIDE) set(MKL_LIBRARIES ${MKL_LIBRARIES} mkl_intel_lp64 mkl_sequential ${MKL_GUIDE} pthread) endif() else() find_library(MKL_LIBRARIES mkl_core PATHS $ENV{MKLLIB} /opt/intel/mkl/*/lib/32 /opt/intel/Compiler/*/*/mkl/lib/32 ${LIB_INSTALL_DIR} ) find_library(MKL_GUIDE guide PATHS $ENV{MKLLIB} /opt/intel/mkl/*/lib/32 /opt/intel/Compiler/*/*/mkl/lib/32 /opt/intel/Compiler/*/*/lib/intel32 ${LIB_INSTALL_DIR} ) if(MKL_LIBRARIES AND MKL_GUIDE) set(MKL_LIBRARIES ${MKL_LIBRARIES} mkl_intel mkl_sequential ${MKL_GUIDE} pthread) endif() endif() endif() include(FindPackageHandleStandardArgs) find_package_handle_standard_args(MKL DEFAULT_MSG MKL_LIBRARIES) mark_as_advanced(MKL_LIBRARIES) ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/cmake/FindMTL4.cmake ================================================ # - Try to find eigen2 headers # Once done this will define # # MTL4_FOUND - system has eigen2 lib # MTL4_INCLUDE_DIR - the eigen2 include directory # # Copyright (C) 2008 Gael Guennebaud # Adapted from FindEigen.cmake: # Copyright (c) 2006, 2007 Montel Laurent, # Redistribution and use is allowed according to the terms of the BSD license. # For details see the accompanying COPYING-CMAKE-SCRIPTS file. if (MTL4_INCLUDE_DIR) # in cache already set(MTL4_FOUND TRUE) else () find_path(MTL4_INCLUDE_DIR NAMES boost/numeric/mtl/mtl.hpp PATHS ${INCLUDE_INSTALL_DIR} ) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(MTL4 DEFAULT_MSG MTL4_INCLUDE_DIR) mark_as_advanced(MTL4_INCLUDE_DIR) endif() ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/cmake/FindOPENBLAS.cmake ================================================ if (OPENBLAS_LIBRARIES) set(OPENBLAS_FIND_QUIETLY TRUE) endif () find_file(OPENBLAS_LIBRARIES NAMES libopenblas.so libopenblas.so.0 PATHS /usr/lib /usr/lib64 $ENV{OPENBLASDIR} ${LIB_INSTALL_DIR}) find_library(OPENBLAS_LIBRARIES openblas PATHS $ENV{OPENBLASDIR} ${LIB_INSTALL_DIR}) if(OPENBLAS_LIBRARIES AND CMAKE_COMPILER_IS_GNUCXX) set(OPENBLAS_LIBRARIES ${OPENBLAS_LIBRARIES} "-lpthread -lgfortran") endif() include(FindPackageHandleStandardArgs) find_package_handle_standard_args(OPENBLAS DEFAULT_MSG OPENBLAS_LIBRARIES) mark_as_advanced(OPENBLAS_LIBRARIES) ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/cmake/FindPackageHandleStandardArgs.cmake ================================================ # FIND_PACKAGE_HANDLE_STANDARD_ARGS(NAME (DEFAULT_MSG|"Custom failure message") VAR1 ... ) # # This macro is intended to be used in FindXXX.cmake modules files. # It handles the REQUIRED and QUIET argument to find_package() and # it also sets the _FOUND variable. # The package is found if all variables listed are TRUE. # Example: # # FIND_PACKAGE_HANDLE_STANDARD_ARGS(LibXml2 DEFAULT_MSG LIBXML2_LIBRARIES LIBXML2_INCLUDE_DIR) # # LibXml2 is considered to be found, if both LIBXML2_LIBRARIES and # LIBXML2_INCLUDE_DIR are valid. Then also LIBXML2_FOUND is set to TRUE. # If it is not found and REQUIRED was used, it fails with FATAL_ERROR, # independent whether QUIET was used or not. # # If it is found, the location is reported using the VAR1 argument, so # here a message "Found LibXml2: /usr/lib/libxml2.so" will be printed out. # If the second argument is DEFAULT_MSG, the message in the failure case will # be "Could NOT find LibXml2", if you don't like this message you can specify # your own custom failure message there. macro(FIND_PACKAGE_HANDLE_STANDARD_ARGS _NAME _FAIL_MSG _VAR1 ) if("${_FAIL_MSG}" STREQUAL "DEFAULT_MSG") if (${_NAME}_FIND_REQUIRED) set(_FAIL_MESSAGE "Could not find REQUIRED package ${_NAME}") else (${_NAME}_FIND_REQUIRED) set(_FAIL_MESSAGE "Could not find OPTIONAL package ${_NAME}") endif (${_NAME}_FIND_REQUIRED) else("${_FAIL_MSG}" STREQUAL "DEFAULT_MSG") set(_FAIL_MESSAGE "${_FAIL_MSG}") endif("${_FAIL_MSG}" STREQUAL "DEFAULT_MSG") string(TOUPPER ${_NAME} _NAME_UPPER) set(${_NAME_UPPER}_FOUND TRUE) if(NOT ${_VAR1}) set(${_NAME_UPPER}_FOUND FALSE) endif(NOT ${_VAR1}) foreach(_CURRENT_VAR ${ARGN}) if(NOT ${_CURRENT_VAR}) set(${_NAME_UPPER}_FOUND FALSE) endif(NOT ${_CURRENT_VAR}) endforeach(_CURRENT_VAR) if (${_NAME_UPPER}_FOUND) if (NOT ${_NAME}_FIND_QUIETLY) message(STATUS "Found ${_NAME}: ${${_VAR1}}") endif (NOT ${_NAME}_FIND_QUIETLY) else (${_NAME_UPPER}_FOUND) if (${_NAME}_FIND_REQUIRED) message(FATAL_ERROR "${_FAIL_MESSAGE}") else (${_NAME}_FIND_REQUIRED) if (NOT ${_NAME}_FIND_QUIETLY) message(STATUS "${_FAIL_MESSAGE}") endif (NOT ${_NAME}_FIND_QUIETLY) endif (${_NAME}_FIND_REQUIRED) endif (${_NAME_UPPER}_FOUND) endmacro(FIND_PACKAGE_HANDLE_STANDARD_ARGS) ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/cmake/FindTvmet.cmake ================================================ # - Try to find tvmet headers # Once done this will define # # TVMET_FOUND - system has tvmet lib # TVMET_INCLUDE_DIR - the tvmet include directory # # Copyright (C) 2008 Gael Guennebaud # Adapted from FindEigen.cmake: # Copyright (c) 2006, 2007 Montel Laurent, # Redistribution and use is allowed according to the terms of the BSD license. # For details see the accompanying COPYING-CMAKE-SCRIPTS file. if (TVMET_INCLUDE_DIR) # in cache already set(TVMET_FOUND TRUE) else () find_path(TVMET_INCLUDE_DIR NAMES tvmet/tvmet.h PATHS ${TVMETDIR}/ ${INCLUDE_INSTALL_DIR} ) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(Tvmet DEFAULT_MSG TVMET_INCLUDE_DIR) mark_as_advanced(TVMET_INCLUDE_DIR) endif() ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/cmake/MacroOptionalAddSubdirectory.cmake ================================================ # - MACRO_OPTIONAL_ADD_SUBDIRECTORY() combines add_subdirectory() with an option() # MACRO_OPTIONAL_ADD_SUBDIRECTORY( ) # If you use MACRO_OPTIONAL_ADD_SUBDIRECTORY() instead of add_subdirectory(), # this will have two effects # 1 - CMake will not complain if the directory doesn't exist # This makes sense if you want to distribute just one of the subdirs # in a source package, e.g. just one of the subdirs in kdeextragear. # 2 - If the directory exists, it will offer an option to skip the # subdirectory. # This is useful if you want to compile only a subset of all # directories. # Copyright (c) 2007, Alexander Neundorf, # # Redistribution and use is allowed according to the terms of the BSD license. # For details see the accompanying COPYING-CMAKE-SCRIPTS file. macro (MACRO_OPTIONAL_ADD_SUBDIRECTORY _dir ) get_filename_component(_fullPath ${_dir} ABSOLUTE) if(EXISTS ${_fullPath}) if(${ARGC} EQUAL 2) option(BUILD_${_dir} "Build directory ${_dir}" ${ARGV1}) else(${ARGC} EQUAL 2) option(BUILD_${_dir} "Build directory ${_dir}" TRUE) endif(${ARGC} EQUAL 2) if(BUILD_${_dir}) add_subdirectory(${_dir}) endif(BUILD_${_dir}) endif(EXISTS ${_fullPath}) endmacro (MACRO_OPTIONAL_ADD_SUBDIRECTORY) ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/generic_bench/bench.hh ================================================ //===================================================== // File : bench.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:16 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef BENCH_HH #define BENCH_HH #include "btl.hh" #include "bench_parameter.hh" #include #include "utilities.h" #include "size_lin_log.hh" #include "xy_file.hh" #include #include #include "timers/portable_perf_analyzer.hh" // #include "timers/mixed_perf_analyzer.hh" // #include "timers/x86_perf_analyzer.hh" // #include "timers/STL_perf_analyzer.hh" #ifdef HAVE_MKL extern "C" void cblas_saxpy(const int, const float, const float*, const int, float *, const int); #endif using namespace std; template class Perf_Analyzer, class Action> BTL_DONT_INLINE void bench( int size_min, int size_max, int nb_point ) { if (BtlConfig::skipAction(Action::name())) return; string filename="bench_"+Action::name()+".dat"; INFOS("starting " < tab_mflops(nb_point); std::vector tab_sizes(nb_point); // matrices and vector size calculations size_lin_log(nb_point,size_min,size_max,tab_sizes); std::vector oldSizes; std::vector oldFlops; bool hasOldResults = read_xy_file(filename, oldSizes, oldFlops, true); int oldi = oldSizes.size() - 1; // loop on matrix size Perf_Analyzer perf_action; for (int i=nb_point-1;i>=0;i--) { //INFOS("size=" <=0 && oldSizes[oldi]>tab_sizes[i]) --oldi; if (oldi>=0 && oldSizes[oldi]==tab_sizes[i]) { if (oldFlops[oldi] "; else std::cout << "\t < "; std::cout << oldFlops[oldi]; } --oldi; } std::cout << " MFlops (" << nb_point-i << "/" << nb_point << ")" << std::endl; } if (!BtlConfig::Instance.overwriteResults) { if (hasOldResults) { // merge the two data std::vector newSizes; std::vector newFlops; unsigned int i=0; unsigned int j=0; while (i BTL_DONT_INLINE void bench( int size_min, int size_max, int nb_point ){ // if the rdtsc is not available : bench(size_min,size_max,nb_point); // if the rdtsc is available : // bench(size_min,size_max,nb_point); // Only for small problem size. Otherwise it will be too long // bench(size_min,size_max,nb_point); // bench(size_min,size_max,nb_point); } #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/generic_bench/bench_parameter.hh ================================================ //===================================================== // File : bench_parameter.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:16 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef BENCH_PARAMETER_HH #define BENCH_PARAMETER_HH // minimal time for each measurement #define REAL_TYPE float // minimal time for each measurement #define MIN_TIME 0.2 // nb of point on bench curves #define NB_POINT 100 // min vector size for axpy bench #define MIN_AXPY 5 // max vector size for axpy bench #define MAX_AXPY 3000000 // min matrix size for matrix vector product bench #define MIN_MV 5 // max matrix size for matrix vector product bench #define MAX_MV 5000 // min matrix size for matrix matrix product bench #define MIN_MM 5 // max matrix size for matrix matrix product bench #define MAX_MM MAX_MV // min matrix size for LU bench #define MIN_LU 5 // max matrix size for LU bench #define MAX_LU 3000 // max size for tiny vector and matrix #define TINY_MV_MAX_SIZE 16 // default nb_sample for x86 timer #define DEFAULT_NB_SAMPLE 1000 // how many times we run a single bench (keep the best perf) #define DEFAULT_NB_TRIES 3 #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/generic_bench/btl.hh ================================================ //===================================================== // File : btl.hh // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef BTL_HH #define BTL_HH #include "bench_parameter.hh" #include #include #include #include #include "utilities.h" #if (defined __GNUC__) #define BTL_ALWAYS_INLINE __attribute__((always_inline)) inline #else #define BTL_ALWAYS_INLINE inline #endif #if (defined __GNUC__) #define BTL_DONT_INLINE __attribute__((noinline)) #else #define BTL_DONT_INLINE #endif #if (defined __GNUC__) #define BTL_ASM_COMMENT(X) asm("#" X) #else #define BTL_ASM_COMMENT(X) #endif #ifdef __SSE__ #include "xmmintrin.h" // This enables flush to zero (FTZ) and denormals are zero (DAZ) modes: #define BTL_DISABLE_SSE_EXCEPTIONS() { _mm_setcsr(_mm_getcsr() | 0x8040); } #else #define BTL_DISABLE_SSE_EXCEPTIONS() #endif /** Enhanced std::string */ class BtlString : public std::string { public: BtlString() : std::string() {} BtlString(const BtlString& str) : std::string(static_cast(str)) {} BtlString(const std::string& str) : std::string(str) {} BtlString(const char* str) : std::string(str) {} operator const char* () const { return c_str(); } void trim( bool left = true, bool right = true ) { int lspaces, rspaces, len = length(), i; lspaces = rspaces = 0; if ( left ) for (i=0; i=0 && (at(i)==' '||at(i)=='\t'||at(i)=='\r'||at(i)=='\n'); rspaces++,i--); *this = substr(lspaces, len-lspaces-rspaces); } std::vector split( const BtlString& delims = "\t\n ") const { std::vector ret; unsigned int numSplits = 0; size_t start, pos; start = 0; do { pos = find_first_of(delims, start); if (pos == start) { ret.push_back(""); start = pos + 1; } else if (pos == npos) ret.push_back( substr(start) ); else { ret.push_back( substr(start, pos - start) ); start = pos + 1; } //start = find_first_not_of(delims, start); ++numSplits; } while (pos != npos); return ret; } bool endsWith(const BtlString& str) const { if(str.size()>this->size()) return false; return this->substr(this->size()-str.size(),str.size()) == str; } bool contains(const BtlString& str) const { return this->find(str)size(); } bool beginsWith(const BtlString& str) const { if(str.size()>this->size()) return false; return this->substr(0,str.size()) == str; } BtlString toLowerCase( void ) { std::transform(begin(), end(), begin(), static_cast(::tolower) ); return *this; } BtlString toUpperCase( void ) { std::transform(begin(), end(), begin(), static_cast(::toupper) ); return *this; } /** Case insensitive comparison. */ bool isEquiv(const BtlString& str) const { BtlString str0 = *this; str0.toLowerCase(); BtlString str1 = str; str1.toLowerCase(); return str0 == str1; } /** Decompose the current string as a path and a file. For instance: "dir1/dir2/file.ext" leads to path="dir1/dir2/" and filename="file.ext" */ void decomposePathAndFile(BtlString& path, BtlString& filename) const { std::vector elements = this->split("/\\"); path = ""; filename = elements.back(); elements.pop_back(); if (this->at(0)=='/') path = "/"; for (unsigned int i=0 ; i config = BtlString(_config).split(" \t\n"); for (unsigned int i = 0; i m_selectedActionNames; }; #define BTL_MAIN \ BtlConfig BtlConfig::Instance #endif // BTL_HH ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/generic_bench/init/init_function.hh ================================================ //===================================================== // File : init_function.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:18 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef INIT_FUNCTION_HH #define INIT_FUNCTION_HH double simple_function(int index) { return index; } double simple_function(int index_i, int index_j) { return index_i+index_j; } double pseudo_random(int /*index*/) { return std::rand()/double(RAND_MAX); } double pseudo_random(int /*index_i*/, int /*index_j*/) { return std::rand()/double(RAND_MAX); } double null_function(int /*index*/) { return 0.0; } double null_function(int /*index_i*/, int /*index_j*/) { return 0.0; } #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/generic_bench/init/init_matrix.hh ================================================ //===================================================== // File : init_matrix.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:19 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef INIT_MATRIX_HH #define INIT_MATRIX_HH // The Vector class must satisfy the following part of STL vector concept : // resize() method // [] operator for setting element // value_type defined template BTL_DONT_INLINE void init_row(Vector & X, int size, int row){ X.resize(size); for (unsigned int j=0;j BTL_DONT_INLINE void init_matrix(Vector & A, int size){ A.resize(size); for (unsigned int row=0; row(A[row],size,row); } } template BTL_DONT_INLINE void init_matrix_symm(Matrix& A, int size){ A.resize(size); for (unsigned int row=0; row // Copyright (C) EDF R&D, lun sep 30 14:23:18 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef INIT_VECTOR_HH #define INIT_VECTOR_HH // The Vector class must satisfy the following part of STL vector concept : // resize() method // [] operator for setting element // value_type defined template void init_vector(Vector & X, int size){ X.resize(size); for (unsigned int i=0;i // Copyright (C) EDF R&D, lun sep 30 14:23:16 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef BENCH_STATIC_HH #define BENCH_STATIC_HH #include "btl.hh" #include "bench_parameter.hh" #include #include "utilities.h" #include "xy_file.hh" #include "static/static_size_generator.hh" #include "timers/portable_perf_analyzer.hh" // #include "timers/mixed_perf_analyzer.hh" // #include "timers/x86_perf_analyzer.hh" using namespace std; template class Perf_Analyzer, template class Action, template class Interface> BTL_DONT_INLINE void bench_static(void) { if (BtlConfig::skipAction(Action >::name())) return; string filename = "bench_" + Action >::name() + ".dat"; INFOS("starting " << filename); const int max_size = TINY_MV_MAX_SIZE; std::vector tab_mflops; std::vector tab_sizes; static_size_generator::go(tab_sizes,tab_mflops); dump_xy_file(tab_sizes,tab_mflops,filename); } // default Perf Analyzer template class Action, template class Interface> BTL_DONT_INLINE void bench_static(void) { bench_static(); //bench_static(); //bench_static(); } #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/generic_bench/static/intel_bench_fixed_size.hh ================================================ //===================================================== // File : intel_bench_fixed_size.hh // Author : L. Plagne // Copyright (C) EDF R&D, mar dc 3 18:59:37 CET 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef _BENCH_FIXED_SIZE_HH_ #define _BENCH_FIXED_SIZE_HH_ #include "utilities.h" #include "function_time.hh" template double bench_fixed_size(int size, unsigned long long & nb_calc,unsigned long long & nb_init) { Action action(size); double time_baseline=time_init(nb_init,action); while (time_baseline < MIN_TIME) { //INFOS("nb_init="< > > perf_action; tab_mflops.push_back(perf_action.eval_mflops(SIZE)); std::cout << tab_mflops.back() << " MFlops" << std::endl; static_size_generator::go(tab_sizes,tab_mflops); }; }; //recursion end template class Perf_Analyzer, template class Action, template class Interface> struct static_size_generator<1,Perf_Analyzer,Action,Interface>{ static void go(vector & tab_sizes, vector & tab_mflops) { tab_sizes.push_back(1); Perf_Analyzer > > perf_action; tab_mflops.push_back(perf_action.eval_mflops(1)); }; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/generic_bench/timers/STL_perf_analyzer.hh ================================================ //===================================================== // File : STL_perf_analyzer.hh // Author : L. Plagne // Copyright (C) EDF R&D, mar dc 3 18:59:35 CET 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef _STL_PERF_ANALYSER_HH #define _STL_PERF_ANALYSER_HH #include "STL_timer.hh" #include "bench_parameter.hh" template class STL_Perf_Analyzer{ public: STL_Perf_Analyzer(unsigned long long nb_sample=DEFAULT_NB_SAMPLE):_nb_sample(nb_sample),_chronos() { MESSAGE("STL_Perf_Analyzer Ctor"); }; STL_Perf_Analyzer( const STL_Perf_Analyzer & ){ INFOS("Copy Ctor not implemented"); exit(0); }; ~STL_Perf_Analyzer( void ){ MESSAGE("STL_Perf_Analyzer Dtor"); }; inline double eval_mflops(int size) { ACTION action(size); _chronos.start_baseline(_nb_sample); do { action.initialize(); } while (_chronos.check()); double baseline_time=_chronos.get_time(); _chronos.start(_nb_sample); do { action.initialize(); action.calculate(); } while (_chronos.check()); double calculate_time=_chronos.get_time(); double corrected_time=calculate_time-baseline_time; // cout << size <<" "< // Copyright (C) EDF R&D, mar dc 3 18:59:35 CET 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // // STL Timer Class. Adapted (L.P.) from the timer class by Musser et Al // described int the Book : STL Tutorial and reference guide. // Define a timer class for analyzing algorithm performance. #include #include #include #include #include using namespace std; class STL_Timer { public: STL_Timer(){ baseline = false; }; // Default constructor // Start a series of r trials: void start(unsigned int r){ reps = r; count = 0; iterations.clear(); iterations.reserve(reps); initial = time(0); }; // Start a series of r trials to determine baseline time: void start_baseline(unsigned int r) { baseline = true; start(r); } // Returns true if the trials have been completed, else false bool check() { ++count; final = time(0); if (initial < final) { iterations.push_back(count); initial = final; count = 0; } return (iterations.size() < reps); }; // Returns the results for external use double get_time( void ) { sort(iterations.begin(), iterations.end()); return 1.0/iterations[reps/2]; }; private: unsigned int reps; // Number of trials // For storing loop iterations of a trial vector iterations; // For saving initial and final times of a trial time_t initial, final; // For counting loop iterations of a trial unsigned long count; // true if this is a baseline computation, false otherwise bool baseline; // For recording the baseline time double baseline_time; }; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/generic_bench/timers/mixed_perf_analyzer.hh ================================================ //===================================================== // File : mixed_perf_analyzer.hh // Author : L. Plagne // Copyright (C) EDF R&D, mar dc 3 18:59:36 CET 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef _MIXED_PERF_ANALYSER_HH #define _MIXED_PERF_ANALYSER_HH #include "x86_perf_analyzer.hh" #include "portable_perf_analyzer.hh" // choose portable perf analyzer for long calculations and x86 analyser for short ones template class Mixed_Perf_Analyzer{ public: Mixed_Perf_Analyzer( void ):_x86pa(),_ppa(),_use_ppa(true) { MESSAGE("Mixed_Perf_Analyzer Ctor"); }; Mixed_Perf_Analyzer( const Mixed_Perf_Analyzer & ){ INFOS("Copy Ctor not implemented"); exit(0); }; ~Mixed_Perf_Analyzer( void ){ MESSAGE("Mixed_Perf_Analyzer Dtor"); }; inline double eval_mflops(int size) { double result=0.0; if (_use_ppa){ result=_ppa.eval_mflops(size); if (_ppa.get_nb_calc()>DEFAULT_NB_SAMPLE){_use_ppa=false;} } else{ result=_x86pa.eval_mflops(size); } return result; } private: Portable_Perf_Analyzer _ppa; X86_Perf_Analyzer _x86pa; bool _use_ppa; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/generic_bench/timers/portable_perf_analyzer.hh ================================================ //===================================================== // File : portable_perf_analyzer.hh // Author : L. Plagne // Copyright (C) EDF R&D, mar d�c 3 18:59:35 CET 2002 // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef _PORTABLE_PERF_ANALYZER_HH #define _PORTABLE_PERF_ANALYZER_HH #include "utilities.h" #include "timers/portable_timer.hh" template class Portable_Perf_Analyzer{ public: Portable_Perf_Analyzer( ):_nb_calc(0), m_time_action(0), _chronos(){ MESSAGE("Portable_Perf_Analyzer Ctor"); }; Portable_Perf_Analyzer( const Portable_Perf_Analyzer & ){ INFOS("Copy Ctor not implemented"); exit(0); }; ~Portable_Perf_Analyzer(){ MESSAGE("Portable_Perf_Analyzer Dtor"); }; BTL_DONT_INLINE double eval_mflops(int size) { Action action(size); // action.initialize(); // time_action = time_calculate(action); while (m_time_action < MIN_TIME) { if(_nb_calc==0) _nb_calc = 1; else _nb_calc *= 2; action.initialize(); m_time_action = time_calculate(action); } // optimize for (int i=1; i // Copyright (C) EDF R&D, mar d�c 3 18:59:35 CET 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef _PORTABLE_PERF_ANALYZER_HH #define _PORTABLE_PERF_ANALYZER_HH #include "utilities.h" #include "timers/portable_timer.hh" template class Portable_Perf_Analyzer{ public: Portable_Perf_Analyzer( void ):_nb_calc(1),_nb_init(1),_chronos(){ MESSAGE("Portable_Perf_Analyzer Ctor"); }; Portable_Perf_Analyzer( const Portable_Perf_Analyzer & ){ INFOS("Copy Ctor not implemented"); exit(0); }; ~Portable_Perf_Analyzer( void ){ MESSAGE("Portable_Perf_Analyzer Dtor"); }; inline double eval_mflops(int size) { Action action(size); // double time_baseline = time_init(action); // while (time_baseline < MIN_TIME_INIT) // { // _nb_init *= 2; // time_baseline = time_init(action); // } // // // optimize // for (int i=1; i #include class Portable_Timer { public: Portable_Timer() { } void start() { m_start_time = double(mach_absolute_time())*1e-9;; } void stop() { m_stop_time = double(mach_absolute_time())*1e-9;; } double elapsed() { return user_time(); } double user_time() { return m_stop_time - m_start_time; } private: double m_stop_time, m_start_time; }; // Portable_Timer (Apple) #else #include #include #include #include class Portable_Timer { public: Portable_Timer() { m_clkid = BtlConfig::Instance.realclock ? CLOCK_REALTIME : CLOCK_PROCESS_CPUTIME_ID; } Portable_Timer(int clkid) : m_clkid(clkid) {} void start() { timespec ts; clock_gettime(m_clkid, &ts); m_start_time = double(ts.tv_sec) + 1e-9 * double(ts.tv_nsec); } void stop() { timespec ts; clock_gettime(m_clkid, &ts); m_stop_time = double(ts.tv_sec) + 1e-9 * double(ts.tv_nsec); } double elapsed() { return user_time(); } double user_time() { return m_stop_time - m_start_time; } private: int m_clkid; double m_stop_time, m_start_time; }; // Portable_Timer (Linux) #endif #endif // PORTABLE_TIMER_HPP ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/generic_bench/timers/x86_perf_analyzer.hh ================================================ //===================================================== // File : x86_perf_analyzer.hh // Author : L. Plagne // Copyright (C) EDF R&D, mar d�c 3 18:59:35 CET 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef _X86_PERF_ANALYSER_HH #define _X86_PERF_ANALYSER_HH #include "x86_timer.hh" #include "bench_parameter.hh" template class X86_Perf_Analyzer{ public: X86_Perf_Analyzer( unsigned long long nb_sample=DEFAULT_NB_SAMPLE):_nb_sample(nb_sample),_chronos() { MESSAGE("X86_Perf_Analyzer Ctor"); _chronos.find_frequency(); }; X86_Perf_Analyzer( const X86_Perf_Analyzer & ){ INFOS("Copy Ctor not implemented"); exit(0); }; ~X86_Perf_Analyzer( void ){ MESSAGE("X86_Perf_Analyzer Dtor"); }; inline double eval_mflops(int size) { ACTION action(size); int nb_loop=5; double calculate_time=0.0; double baseline_time=0.0; for (int j=0 ; j < nb_loop ; j++){ _chronos.clear(); for(int i=0 ; i < _nb_sample ; i++) { _chronos.start(); action.initialize(); action.calculate(); _chronos.stop(); _chronos.add_get_click(); } calculate_time += double(_chronos.get_shortest_clicks())/_chronos.frequency(); if (j==0) action.check_result(); _chronos.clear(); for(int i=0 ; i < _nb_sample ; i++) { _chronos.start(); action.initialize(); _chronos.stop(); _chronos.add_get_click(); } baseline_time+=double(_chronos.get_shortest_clicks())/_chronos.frequency(); } double corrected_time = (calculate_time-baseline_time)/double(nb_loop); // INFOS("_nb_sample="<<_nb_sample); // INFOS("baseline_time="< // Copyright (C) EDF R&D, mar d�c 3 18:59:35 CET 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef _X86_TIMER_HH #define _X86_TIMER_HH #include #include #include #include //#include "system_time.h" #define u32 unsigned int #include #include "utilities.h" #include #include #include #include // frequence de la becanne en Hz //#define FREQUENCY 648000000 //#define FREQUENCY 1400000000 #define FREQUENCY 1695000000 using namespace std; class X86_Timer { public : X86_Timer( void ):_frequency(FREQUENCY),_nb_sample(0) { MESSAGE("X86_Timer Default Ctor"); } inline void start( void ){ rdtsc(_click_start.n32[0],_click_start.n32[1]); } inline void stop( void ){ rdtsc(_click_stop.n32[0],_click_stop.n32[1]); } inline double frequency( void ){ return _frequency; } double get_elapsed_time_in_second( void ){ return (_click_stop.n64-_click_start.n64)/double(FREQUENCY); } unsigned long long get_click( void ){ return (_click_stop.n64-_click_start.n64); } inline void find_frequency( void ){ time_t initial, final; int dummy=2; initial = time(0); start(); do { dummy+=2; } while(time(0)==initial); // On est au debut d'un cycle d'une seconde !!! initial = time(0); start(); do { dummy+=2; } while(time(0)==initial); final=time(0); stop(); // INFOS("fine grained time : "<< get_elapsed_time_in_second()); // INFOS("coarse grained time : "<< final-initial); _frequency=_frequency*get_elapsed_time_in_second()/double(final-initial); /// INFOS("CPU frequency : "<< _frequency); } void add_get_click( void ){ _nb_sample++; _counted_clicks[get_click()]++; fill_history_clicks(); } void dump_statistics(string filemane){ ofstream outfile (filemane.c_str(),ios::out) ; std::map::iterator itr; for(itr=_counted_clicks.begin() ; itr!=_counted_clicks.end() ; itr++) { outfile << (*itr).first << " " << (*itr).second << endl ; } outfile.close(); } void dump_history(string filemane){ ofstream outfile (filemane.c_str(),ios::out) ; for(int i=0 ; i<_history_mean_clicks.size() ; i++) { outfile << i << " " << _history_mean_clicks[i] << " " << _history_shortest_clicks[i] << " " << _history_most_occured_clicks[i] << endl ; } outfile.close(); } double get_mean_clicks( void ){ std::map::iterator itr; unsigned long long mean_clicks=0; for(itr=_counted_clicks.begin() ; itr!=_counted_clicks.end() ; itr++) { mean_clicks+=(*itr).second*(*itr).first; } return mean_clicks/double(_nb_sample); } double get_shortest_clicks( void ){ return double((*_counted_clicks.begin()).first); } void fill_history_clicks( void ){ _history_mean_clicks.push_back(get_mean_clicks()); _history_shortest_clicks.push_back(get_shortest_clicks()); _history_most_occured_clicks.push_back(get_most_occured_clicks()); } double get_most_occured_clicks( void ){ unsigned long long moc=0; unsigned long long max_occurence=0; std::map::iterator itr; for(itr=_counted_clicks.begin() ; itr!=_counted_clicks.end() ; itr++) { if (max_occurence<=(*itr).second){ max_occurence=(*itr).second; moc=(*itr).first; } } return double(moc); } void clear( void ) { _counted_clicks.clear(); _history_mean_clicks.clear(); _history_shortest_clicks.clear(); _history_most_occured_clicks.clear(); _nb_sample=0; } private : union { unsigned long int n32[2] ; unsigned long long n64 ; } _click_start; union { unsigned long int n32[2] ; unsigned long long n64 ; } _click_stop; double _frequency ; map _counted_clicks; vector _history_mean_clicks; vector _history_shortest_clicks; vector _history_most_occured_clicks; unsigned long long _nb_sample; }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/generic_bench/utils/size_lin_log.hh ================================================ //===================================================== // File : size_lin_log.hh // Author : L. Plagne // Copyright (C) EDF R&D, mar dc 3 18:59:37 CET 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef SIZE_LIN_LOG #define SIZE_LIN_LOG #include "size_log.hh" template void size_lin_log(const int nb_point, const int /*size_min*/, const int size_max, Vector & X) { int ten=10; int nine=9; X.resize(nb_point); if (nb_point>ten){ for (int i=0;i void size_log(const int nb_point, const int size_min, const int size_max, Vector & X) { X.resize(nb_point); float ls_min=log(float(size_min)); float ls_max=log(float(size_max)); float ls=0.0; float delta_ls=(ls_max-ls_min)/(float(nb_point-1)); int size=0; for (int i=0;i //# include ok for gcc3.01 # include /* --- INFOS is always defined (without _DEBUG_): to be used for warnings, with release version --- */ # define HEREWEARE cout< // Copyright (C) EDF R&D, lun sep 30 14:23:20 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef XY_FILE_HH #define XY_FILE_HH #include #include #include #include using namespace std; bool read_xy_file(const std::string & filename, std::vector & tab_sizes, std::vector & tab_mflops, bool quiet = false) { std::ifstream input_file (filename.c_str(),std::ios::in); if (!input_file){ if (!quiet) { INFOS("!!! Error opening "<> size >> mflops ){ nb_point++; tab_sizes.push_back(size); tab_mflops.push_back(mflops); } SCRUTE(nb_point); input_file.close(); return true; } // The Vector class must satisfy the following part of STL vector concept : // resize() method // [] operator for setting element // the vector element must have the << operator define using namespace std; template void dump_xy_file(const Vector_A & X, const Vector_B & Y, const std::string & filename){ ofstream outfile (filename.c_str(),ios::out) ; int size=X.size(); for (int i=0;i BLASFUNC(cdotu) (int *, float *, int *, float *, int *); std::complex BLASFUNC(cdotc) (int *, float *, int *, float *, int *); std::complex BLASFUNC(zdotu) (int *, double *, int *, double *, int *); std::complex BLASFUNC(zdotc) (int *, double *, int *, double *, int *); double BLASFUNC(xdotu) (int *, double *, int *, double *, int *); double BLASFUNC(xdotc) (int *, double *, int *, double *, int *); #endif int BLASFUNC(cdotuw) (int *, float *, int *, float *, int *, float*); int BLASFUNC(cdotcw) (int *, float *, int *, float *, int *, float*); int BLASFUNC(zdotuw) (int *, double *, int *, double *, int *, double*); int BLASFUNC(zdotcw) (int *, double *, int *, double *, int *, double*); int BLASFUNC(saxpy) (int *, float *, float *, int *, float *, int *); int BLASFUNC(daxpy) (int *, double *, double *, int *, double *, int *); int BLASFUNC(qaxpy) (int *, double *, double *, int *, double *, int *); int BLASFUNC(caxpy) (int *, float *, float *, int *, float *, int *); int BLASFUNC(zaxpy) (int *, double *, double *, int *, double *, int *); int BLASFUNC(xaxpy) (int *, double *, double *, int *, double *, int *); int BLASFUNC(caxpyc)(int *, float *, float *, int *, float *, int *); int BLASFUNC(zaxpyc)(int *, double *, double *, int *, double *, int *); int BLASFUNC(xaxpyc)(int *, double *, double *, int *, double *, int *); int BLASFUNC(scopy) (int *, float *, int *, float *, int *); int BLASFUNC(dcopy) (int *, double *, int *, double *, int *); int BLASFUNC(qcopy) (int *, double *, int *, double *, int *); int BLASFUNC(ccopy) (int *, float *, int *, float *, int *); int BLASFUNC(zcopy) (int *, double *, int *, double *, int *); int BLASFUNC(xcopy) (int *, double *, int *, double *, int *); int BLASFUNC(sswap) (int *, float *, int *, float *, int *); int BLASFUNC(dswap) (int *, double *, int *, double *, int *); int BLASFUNC(qswap) (int *, double *, int *, double *, int *); int BLASFUNC(cswap) (int *, float *, int *, float *, int *); int BLASFUNC(zswap) (int *, double *, int *, double *, int *); int BLASFUNC(xswap) (int *, double *, int *, double *, int *); float BLASFUNC(sasum) (int *, float *, int *); float BLASFUNC(scasum)(int *, float *, int *); double BLASFUNC(dasum) (int *, double *, int *); double BLASFUNC(qasum) (int *, double *, int *); double BLASFUNC(dzasum)(int *, double *, int *); double BLASFUNC(qxasum)(int *, double *, int *); int BLASFUNC(isamax)(int *, float *, int *); int BLASFUNC(idamax)(int *, double *, int *); int BLASFUNC(iqamax)(int *, double *, int *); int BLASFUNC(icamax)(int *, float *, int *); int BLASFUNC(izamax)(int *, double *, int *); int BLASFUNC(ixamax)(int *, double *, int *); int BLASFUNC(ismax) (int *, float *, int *); int BLASFUNC(idmax) (int *, double *, int *); int BLASFUNC(iqmax) (int *, double *, int *); int BLASFUNC(icmax) (int *, float *, int *); int BLASFUNC(izmax) (int *, double *, int *); int BLASFUNC(ixmax) (int *, double *, int *); int BLASFUNC(isamin)(int *, float *, int *); int BLASFUNC(idamin)(int *, double *, int *); int BLASFUNC(iqamin)(int *, double *, int *); int BLASFUNC(icamin)(int *, float *, int *); int BLASFUNC(izamin)(int *, double *, int *); int BLASFUNC(ixamin)(int *, double *, int *); int BLASFUNC(ismin)(int *, float *, int *); int BLASFUNC(idmin)(int *, double *, int *); int BLASFUNC(iqmin)(int *, double *, int *); int BLASFUNC(icmin)(int *, float *, int *); int BLASFUNC(izmin)(int *, double *, int *); int BLASFUNC(ixmin)(int *, double *, int *); float BLASFUNC(samax) (int *, float *, int *); double BLASFUNC(damax) (int *, double *, int *); double BLASFUNC(qamax) (int *, double *, int *); float BLASFUNC(scamax)(int *, float *, int *); double BLASFUNC(dzamax)(int *, double *, int *); double BLASFUNC(qxamax)(int *, double *, int *); float BLASFUNC(samin) (int *, float *, int *); double BLASFUNC(damin) (int *, double *, int *); double BLASFUNC(qamin) (int *, double *, int *); float BLASFUNC(scamin)(int *, float *, int *); double BLASFUNC(dzamin)(int *, double *, int *); double BLASFUNC(qxamin)(int *, double *, int *); float BLASFUNC(smax) (int *, float *, int *); double BLASFUNC(dmax) (int *, double *, int *); double BLASFUNC(qmax) (int *, double *, int *); float BLASFUNC(scmax) (int *, float *, int *); double BLASFUNC(dzmax) (int *, double *, int *); double BLASFUNC(qxmax) (int *, double *, int *); float BLASFUNC(smin) (int *, float *, int *); double BLASFUNC(dmin) (int *, double *, int *); double BLASFUNC(qmin) (int *, double *, int *); float BLASFUNC(scmin) (int *, float *, int *); double BLASFUNC(dzmin) (int *, double *, int *); double BLASFUNC(qxmin) (int *, double *, int *); int BLASFUNC(sscal) (int *, float *, float *, int *); int BLASFUNC(dscal) (int *, double *, double *, int *); int BLASFUNC(qscal) (int *, double *, double *, int *); int BLASFUNC(cscal) (int *, float *, float *, int *); int BLASFUNC(zscal) (int *, double *, double *, int *); int BLASFUNC(xscal) (int *, double *, double *, int *); int BLASFUNC(csscal)(int *, float *, float *, int *); int BLASFUNC(zdscal)(int *, double *, double *, int *); int BLASFUNC(xqscal)(int *, double *, double *, int *); float BLASFUNC(snrm2) (int *, float *, int *); float BLASFUNC(scnrm2)(int *, float *, int *); double BLASFUNC(dnrm2) (int *, double *, int *); double BLASFUNC(qnrm2) (int *, double *, int *); double BLASFUNC(dznrm2)(int *, double *, int *); double BLASFUNC(qxnrm2)(int *, double *, int *); int BLASFUNC(srot) (int *, float *, int *, float *, int *, float *, float *); int BLASFUNC(drot) (int *, double *, int *, double *, int *, double *, double *); int BLASFUNC(qrot) (int *, double *, int *, double *, int *, double *, double *); int BLASFUNC(csrot) (int *, float *, int *, float *, int *, float *, float *); int BLASFUNC(zdrot) (int *, double *, int *, double *, int *, double *, double *); int BLASFUNC(xqrot) (int *, double *, int *, double *, int *, double *, double *); int BLASFUNC(srotg) (float *, float *, float *, float *); int BLASFUNC(drotg) (double *, double *, double *, double *); int BLASFUNC(qrotg) (double *, double *, double *, double *); int BLASFUNC(crotg) (float *, float *, float *, float *); int BLASFUNC(zrotg) (double *, double *, double *, double *); int BLASFUNC(xrotg) (double *, double *, double *, double *); int BLASFUNC(srotmg)(float *, float *, float *, float *, float *); int BLASFUNC(drotmg)(double *, double *, double *, double *, double *); int BLASFUNC(srotm) (int *, float *, int *, float *, int *, float *); int BLASFUNC(drotm) (int *, double *, int *, double *, int *, double *); int BLASFUNC(qrotm) (int *, double *, int *, double *, int *, double *); /* Level 2 routines */ int BLASFUNC(sger)(int *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(dger)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(qger)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(cgeru)(int *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(cgerc)(int *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(zgeru)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(zgerc)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(xgeru)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(xgerc)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(sgemv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dgemv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qgemv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cgemv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zgemv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xgemv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(strsv) (char *, char *, char *, int *, float *, int *, float *, int *); int BLASFUNC(dtrsv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(qtrsv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(ctrsv) (char *, char *, char *, int *, float *, int *, float *, int *); int BLASFUNC(ztrsv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(xtrsv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(stpsv) (char *, char *, char *, int *, float *, float *, int *); int BLASFUNC(dtpsv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(qtpsv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(ctpsv) (char *, char *, char *, int *, float *, float *, int *); int BLASFUNC(ztpsv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(xtpsv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(strmv) (char *, char *, char *, int *, float *, int *, float *, int *); int BLASFUNC(dtrmv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(qtrmv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(ctrmv) (char *, char *, char *, int *, float *, int *, float *, int *); int BLASFUNC(ztrmv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(xtrmv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(stpmv) (char *, char *, char *, int *, float *, float *, int *); int BLASFUNC(dtpmv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(qtpmv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(ctpmv) (char *, char *, char *, int *, float *, float *, int *); int BLASFUNC(ztpmv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(xtpmv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(stbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); int BLASFUNC(dtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(qtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(ctbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); int BLASFUNC(ztbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(xtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(stbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); int BLASFUNC(dtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(qtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(ctbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); int BLASFUNC(ztbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(xtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(ssymv) (char *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dsymv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qsymv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(csymv) (char *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsymv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xsymv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(sspmv) (char *, int *, float *, float *, float *, int *, float *, float *, int *); int BLASFUNC(dspmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(qspmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(cspmv) (char *, int *, float *, float *, float *, int *, float *, float *, int *); int BLASFUNC(zspmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(xspmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(ssyr) (char *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dsyr) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(qsyr) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(csyr) (char *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zsyr) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(xsyr) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(ssyr2) (char *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(dsyr2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(qsyr2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(csyr2) (char *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(zsyr2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(xsyr2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(sspr) (char *, int *, float *, float *, int *, float *); int BLASFUNC(dspr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(qspr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(cspr) (char *, int *, float *, float *, int *, float *); int BLASFUNC(zspr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(xspr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(sspr2) (char *, int *, float *, float *, int *, float *, int *, float *); int BLASFUNC(dspr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(qspr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(cspr2) (char *, int *, float *, float *, int *, float *, int *, float *); int BLASFUNC(zspr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(xspr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(cher) (char *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zher) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(xher) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(chpr) (char *, int *, float *, float *, int *, float *); int BLASFUNC(zhpr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(xhpr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(cher2) (char *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(zher2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(xher2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(chpr2) (char *, int *, float *, float *, int *, float *, int *, float *); int BLASFUNC(zhpr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(xhpr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(chemv) (char *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zhemv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xhemv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(chpmv) (char *, int *, float *, float *, float *, int *, float *, float *, int *); int BLASFUNC(zhpmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(xhpmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(snorm)(char *, int *, int *, float *, int *); int BLASFUNC(dnorm)(char *, int *, int *, double *, int *); int BLASFUNC(cnorm)(char *, int *, int *, float *, int *); int BLASFUNC(znorm)(char *, int *, int *, double *, int *); int BLASFUNC(sgbmv)(char *, int *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cgbmv)(char *, int *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(ssbmv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dsbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qsbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(csbmv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xsbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(chbmv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zhbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xhbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); /* Level 3 routines */ int BLASFUNC(sgemm)(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dgemm)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qgemm)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cgemm)(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zgemm)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xgemm)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cgemm3m)(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zgemm3m)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xgemm3m)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(sge2mm)(char *, char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dge2mm)(char *, char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cge2mm)(char *, char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zge2mm)(char *, char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(strsm)(char *, char *, char *, char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dtrsm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(qtrsm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(ctrsm)(char *, char *, char *, char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(ztrsm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(xtrsm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(strmm)(char *, char *, char *, char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dtrmm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(qtrmm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(ctrmm)(char *, char *, char *, char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(ztrmm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(xtrmm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(ssymm)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dsymm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qsymm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(csymm)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsymm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xsymm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(csymm3m)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsymm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xsymm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(ssyrk)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *); int BLASFUNC(dsyrk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(qsyrk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(csyrk)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *); int BLASFUNC(zsyrk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(xsyrk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(ssyr2k)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dsyr2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(qsyr2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(csyr2k)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsyr2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(xsyr2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(chemm)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zhemm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xhemm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(chemm3m)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zhemm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xhemm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cherk)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *); int BLASFUNC(zherk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(xherk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(cher2k)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zher2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(xher2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(cher2m)(char *, char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zher2m)(char *, char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(xher2m)(char *, char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(sgemt)(char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dgemt)(char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(cgemt)(char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zgemt)(char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(sgema)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dgema)(char *, char *, int *, int *, double *, double *, int *, double*, double *, int *, double*, int *); int BLASFUNC(cgema)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zgema)(char *, char *, int *, int *, double *, double *, int *, double*, double *, int *, double*, int *); int BLASFUNC(sgems)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dgems)(char *, char *, int *, int *, double *, double *, int *, double*, double *, int *, double*, int *); int BLASFUNC(cgems)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zgems)(char *, char *, int *, int *, double *, double *, int *, double*, double *, int *, double*, int *); int BLASFUNC(sgetf2)(int *, int *, float *, int *, int *, int *); int BLASFUNC(dgetf2)(int *, int *, double *, int *, int *, int *); int BLASFUNC(qgetf2)(int *, int *, double *, int *, int *, int *); int BLASFUNC(cgetf2)(int *, int *, float *, int *, int *, int *); int BLASFUNC(zgetf2)(int *, int *, double *, int *, int *, int *); int BLASFUNC(xgetf2)(int *, int *, double *, int *, int *, int *); int BLASFUNC(sgetrf)(int *, int *, float *, int *, int *, int *); int BLASFUNC(dgetrf)(int *, int *, double *, int *, int *, int *); int BLASFUNC(qgetrf)(int *, int *, double *, int *, int *, int *); int BLASFUNC(cgetrf)(int *, int *, float *, int *, int *, int *); int BLASFUNC(zgetrf)(int *, int *, double *, int *, int *, int *); int BLASFUNC(xgetrf)(int *, int *, double *, int *, int *, int *); int BLASFUNC(slaswp)(int *, float *, int *, int *, int *, int *, int *); int BLASFUNC(dlaswp)(int *, double *, int *, int *, int *, int *, int *); int BLASFUNC(qlaswp)(int *, double *, int *, int *, int *, int *, int *); int BLASFUNC(claswp)(int *, float *, int *, int *, int *, int *, int *); int BLASFUNC(zlaswp)(int *, double *, int *, int *, int *, int *, int *); int BLASFUNC(xlaswp)(int *, double *, int *, int *, int *, int *, int *); int BLASFUNC(sgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *); int BLASFUNC(dgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); int BLASFUNC(qgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); int BLASFUNC(cgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *); int BLASFUNC(zgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); int BLASFUNC(xgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); int BLASFUNC(sgesv)(int *, int *, float *, int *, int *, float *, int *, int *); int BLASFUNC(dgesv)(int *, int *, double *, int *, int *, double*, int *, int *); int BLASFUNC(qgesv)(int *, int *, double *, int *, int *, double*, int *, int *); int BLASFUNC(cgesv)(int *, int *, float *, int *, int *, float *, int *, int *); int BLASFUNC(zgesv)(int *, int *, double *, int *, int *, double*, int *, int *); int BLASFUNC(xgesv)(int *, int *, double *, int *, int *, double*, int *, int *); int BLASFUNC(spotf2)(char *, int *, float *, int *, int *); int BLASFUNC(dpotf2)(char *, int *, double *, int *, int *); int BLASFUNC(qpotf2)(char *, int *, double *, int *, int *); int BLASFUNC(cpotf2)(char *, int *, float *, int *, int *); int BLASFUNC(zpotf2)(char *, int *, double *, int *, int *); int BLASFUNC(xpotf2)(char *, int *, double *, int *, int *); int BLASFUNC(spotrf)(char *, int *, float *, int *, int *); int BLASFUNC(dpotrf)(char *, int *, double *, int *, int *); int BLASFUNC(qpotrf)(char *, int *, double *, int *, int *); int BLASFUNC(cpotrf)(char *, int *, float *, int *, int *); int BLASFUNC(zpotrf)(char *, int *, double *, int *, int *); int BLASFUNC(xpotrf)(char *, int *, double *, int *, int *); int BLASFUNC(slauu2)(char *, int *, float *, int *, int *); int BLASFUNC(dlauu2)(char *, int *, double *, int *, int *); int BLASFUNC(qlauu2)(char *, int *, double *, int *, int *); int BLASFUNC(clauu2)(char *, int *, float *, int *, int *); int BLASFUNC(zlauu2)(char *, int *, double *, int *, int *); int BLASFUNC(xlauu2)(char *, int *, double *, int *, int *); int BLASFUNC(slauum)(char *, int *, float *, int *, int *); int BLASFUNC(dlauum)(char *, int *, double *, int *, int *); int BLASFUNC(qlauum)(char *, int *, double *, int *, int *); int BLASFUNC(clauum)(char *, int *, float *, int *, int *); int BLASFUNC(zlauum)(char *, int *, double *, int *, int *); int BLASFUNC(xlauum)(char *, int *, double *, int *, int *); int BLASFUNC(strti2)(char *, char *, int *, float *, int *, int *); int BLASFUNC(dtrti2)(char *, char *, int *, double *, int *, int *); int BLASFUNC(qtrti2)(char *, char *, int *, double *, int *, int *); int BLASFUNC(ctrti2)(char *, char *, int *, float *, int *, int *); int BLASFUNC(ztrti2)(char *, char *, int *, double *, int *, int *); int BLASFUNC(xtrti2)(char *, char *, int *, double *, int *, int *); int BLASFUNC(strtri)(char *, char *, int *, float *, int *, int *); int BLASFUNC(dtrtri)(char *, char *, int *, double *, int *, int *); int BLASFUNC(qtrtri)(char *, char *, int *, double *, int *, int *); int BLASFUNC(ctrtri)(char *, char *, int *, float *, int *, int *); int BLASFUNC(ztrtri)(char *, char *, int *, double *, int *, int *); int BLASFUNC(xtrtri)(char *, char *, int *, double *, int *, int *); int BLASFUNC(spotri)(char *, int *, float *, int *, int *); int BLASFUNC(dpotri)(char *, int *, double *, int *, int *); int BLASFUNC(qpotri)(char *, int *, double *, int *, int *); int BLASFUNC(cpotri)(char *, int *, float *, int *, int *); int BLASFUNC(zpotri)(char *, int *, double *, int *, int *); int BLASFUNC(xpotri)(char *, int *, double *, int *, int *); #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/BLAS/blas_interface.hh ================================================ //===================================================== // File : blas_interface.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:28 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef blas_PRODUIT_MATRICE_VECTEUR_HH #define blas_PRODUIT_MATRICE_VECTEUR_HH #include #include extern "C" { #include "blas.h" // Cholesky Factorization // void spotrf_(const char* uplo, const int* n, float *a, const int* ld, int* info); // void dpotrf_(const char* uplo, const int* n, double *a, const int* ld, int* info); void ssytrd_(char *uplo, const int *n, float *a, const int *lda, float *d, float *e, float *tau, float *work, int *lwork, int *info ); void dsytrd_(char *uplo, const int *n, double *a, const int *lda, double *d, double *e, double *tau, double *work, int *lwork, int *info ); void sgehrd_( const int *n, int *ilo, int *ihi, float *a, const int *lda, float *tau, float *work, int *lwork, int *info ); void dgehrd_( const int *n, int *ilo, int *ihi, double *a, const int *lda, double *tau, double *work, int *lwork, int *info ); // LU row pivoting // void dgetrf_( int *m, int *n, double *a, int *lda, int *ipiv, int *info ); // void sgetrf_(const int* m, const int* n, float *a, const int* ld, int* ipivot, int* info); // LU full pivoting void sgetc2_(const int* n, float *a, const int *lda, int *ipiv, int *jpiv, int*info ); void dgetc2_(const int* n, double *a, const int *lda, int *ipiv, int *jpiv, int*info ); #ifdef HAS_LAPACK #endif } #define MAKE_STRING2(S) #S #define MAKE_STRING(S) MAKE_STRING2(S) #define CAT2(A,B) A##B #define CAT(A,B) CAT2(A,B) template class blas_interface; static char notrans = 'N'; static char trans = 'T'; static char nonunit = 'N'; static char lower = 'L'; static char right = 'R'; static char left = 'L'; static int intone = 1; #define SCALAR float #define SCALAR_PREFIX s #include "blas_interface_impl.hh" #undef SCALAR #undef SCALAR_PREFIX #define SCALAR double #define SCALAR_PREFIX d #include "blas_interface_impl.hh" #undef SCALAR #undef SCALAR_PREFIX #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/BLAS/blas_interface_impl.hh ================================================ #define BLAS_FUNC(NAME) CAT(CAT(SCALAR_PREFIX,NAME),_) template<> class blas_interface : public c_interface_base { public : static SCALAR fone; static SCALAR fzero; static inline std::string name() { return MAKE_STRING(CBLASNAME); } static inline void matrix_vector_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ BLAS_FUNC(gemv)(¬rans,&N,&N,&fone,A,&N,B,&intone,&fzero,X,&intone); } static inline void symv(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ BLAS_FUNC(symv)(&lower, &N,&fone,A,&N,B,&intone,&fzero,X,&intone); } static inline void syr2(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ BLAS_FUNC(syr2)(&lower,&N,&fone,B,&intone,X,&intone,A,&N); } static inline void ger(gene_matrix & A, gene_vector & X, gene_vector & Y, int N){ BLAS_FUNC(ger)(&N,&N,&fone,X,&intone,Y,&intone,A,&N); } static inline void rot(gene_vector & A, gene_vector & B, SCALAR c, SCALAR s, int N){ BLAS_FUNC(rot)(&N,A,&intone,B,&intone,&c,&s); } static inline void atv_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ BLAS_FUNC(gemv)(&trans,&N,&N,&fone,A,&N,B,&intone,&fzero,X,&intone); } static inline void matrix_matrix_product(gene_matrix & A, gene_matrix & B, gene_matrix & X, int N){ BLAS_FUNC(gemm)(¬rans,¬rans,&N,&N,&N,&fone,A,&N,B,&N,&fzero,X,&N); } static inline void transposed_matrix_matrix_product(gene_matrix & A, gene_matrix & B, gene_matrix & X, int N){ BLAS_FUNC(gemm)(¬rans,¬rans,&N,&N,&N,&fone,A,&N,B,&N,&fzero,X,&N); } static inline void ata_product(gene_matrix & A, gene_matrix & X, int N){ BLAS_FUNC(syrk)(&lower,&trans,&N,&N,&fone,A,&N,&fzero,X,&N); } static inline void aat_product(gene_matrix & A, gene_matrix & X, int N){ BLAS_FUNC(syrk)(&lower,¬rans,&N,&N,&fone,A,&N,&fzero,X,&N); } static inline void axpy(SCALAR coef, const gene_vector & X, gene_vector & Y, int N){ BLAS_FUNC(axpy)(&N,&coef,X,&intone,Y,&intone); } static inline void axpby(SCALAR a, const gene_vector & X, SCALAR b, gene_vector & Y, int N){ BLAS_FUNC(scal)(&N,&b,Y,&intone); BLAS_FUNC(axpy)(&N,&a,X,&intone,Y,&intone); } static inline void cholesky(const gene_matrix & X, gene_matrix & C, int N){ int N2 = N*N; BLAS_FUNC(copy)(&N2, X, &intone, C, &intone); char uplo = 'L'; int info = 0; BLAS_FUNC(potrf)(&uplo, &N, C, &N, &info); if(info!=0) std::cerr << "potrf_ error " << info << "\n"; } static inline void partial_lu_decomp(const gene_matrix & X, gene_matrix & C, int N){ int N2 = N*N; BLAS_FUNC(copy)(&N2, X, &intone, C, &intone); int info = 0; int * ipiv = (int*)alloca(sizeof(int)*N); BLAS_FUNC(getrf)(&N, &N, C, &N, ipiv, &info); if(info!=0) std::cerr << "getrf_ error " << info << "\n"; } static inline void trisolve_lower(const gene_matrix & L, const gene_vector& B, gene_vector & X, int N){ BLAS_FUNC(copy)(&N, B, &intone, X, &intone); BLAS_FUNC(trsv)(&lower, ¬rans, &nonunit, &N, L, &N, X, &intone); } static inline void trisolve_lower_matrix(const gene_matrix & L, const gene_matrix& B, gene_matrix & X, int N){ BLAS_FUNC(copy)(&N, B, &intone, X, &intone); BLAS_FUNC(trsm)(&right, &lower, ¬rans, &nonunit, &N, &N, &fone, L, &N, X, &N); } static inline void trmm(gene_matrix & A, gene_matrix & B, gene_matrix & /*X*/, int N){ BLAS_FUNC(trmm)(&left, &lower, ¬rans,&nonunit, &N,&N,&fone,A,&N,B,&N); } #ifdef HAS_LAPACK static inline void lu_decomp(const gene_matrix & X, gene_matrix & C, int N){ int N2 = N*N; BLAS_FUNC(copy)(&N2, X, &intone, C, &intone); int info = 0; int * ipiv = (int*)alloca(sizeof(int)*N); int * jpiv = (int*)alloca(sizeof(int)*N); BLAS_FUNC(getc2)(&N, C, &N, ipiv, jpiv, &info); } static inline void hessenberg(const gene_matrix & X, gene_matrix & C, int N){ { int N2 = N*N; int inc = 1; BLAS_FUNC(copy)(&N2, X, &inc, C, &inc); } int info = 0; int ilo = 1; int ihi = N; int bsize = 64; int worksize = N*bsize; SCALAR* d = new SCALAR[N+worksize]; BLAS_FUNC(gehrd)(&N, &ilo, &ihi, C, &N, d, d+N, &worksize, &info); delete[] d; } static inline void tridiagonalization(const gene_matrix & X, gene_matrix & C, int N){ { int N2 = N*N; int inc = 1; BLAS_FUNC(copy)(&N2, X, &inc, C, &inc); } char uplo = 'U'; int info = 0; int bsize = 64; int worksize = N*bsize; SCALAR* d = new SCALAR[3*N+worksize]; BLAS_FUNC(sytrd)(&uplo, &N, C, &N, d, d+N, d+2*N, d+3*N, &worksize, &info); delete[] d; } #endif // HAS_LAPACK }; SCALAR blas_interface::fone = SCALAR(1); SCALAR blas_interface::fzero = SCALAR(0); ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/BLAS/c_interface_base.h ================================================ #ifndef BTL_C_INTERFACE_BASE_H #define BTL_C_INTERFACE_BASE_H #include "utilities.h" #include template class c_interface_base { public: typedef real real_type; typedef std::vector stl_vector; typedef std::vector stl_matrix; typedef real* gene_matrix; typedef real* gene_vector; static void free_matrix(gene_matrix & A, int /*N*/){ delete[] A; } static void free_vector(gene_vector & B){ delete[] B; } static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ int N = A_stl.size(); A = new real[N*N]; for (int j=0;j // Copyright (C) EDF R&D, lun sep 30 14:23:28 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "blas_interface.hh" #include "bench.hh" #include "basic_actions.hh" #include "action_cholesky.hh" #include "action_lu_decomp.hh" #include "action_partial_lu.hh" #include "action_trisolve_matrix.hh" #ifdef HAS_LAPACK #include "action_hessenberg.hh" #endif BTL_MAIN; int main() { bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_LU,MAX_LU,NB_POINT); bench > >(MIN_LU,MAX_LU,NB_POINT); #ifdef HAS_LAPACK // bench > >(MIN_LU,MAX_LU,NB_POINT); bench > >(MIN_LU,MAX_LU,NB_POINT); bench > >(MIN_LU,MAX_LU,NB_POINT); #endif //bench > >(MIN_LU,MAX_LU,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/STL/STL_interface.hh ================================================ //===================================================== // File : STL_interface.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:24 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef STL_INTERFACE_HH #define STL_INTERFACE_HH #include #include #include "utilities.h" using namespace std; template class STL_interface{ public : typedef real real_type ; typedef std::vector stl_vector; typedef std::vector stl_matrix; typedef stl_matrix gene_matrix; typedef stl_vector gene_vector; static inline std::string name( void ) { return "STL"; } static void free_matrix(gene_matrix & /*A*/, int /*N*/){} static void free_vector(gene_vector & /*B*/){} static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ A = A_stl; } static inline void vector_from_stl(gene_vector & B, stl_vector & B_stl){ B = B_stl; } static inline void vector_to_stl(gene_vector & B, stl_vector & B_stl){ B_stl = B ; } static inline void matrix_to_stl(gene_matrix & A, stl_matrix & A_stl){ A_stl = A ; } static inline void copy_vector(const gene_vector & source, gene_vector & cible, int N){ for (int i=0;i=j) { for (int k=0;k > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/blaze/blaze_interface.hh ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef BLAZE_INTERFACE_HH #define BLAZE_INTERFACE_HH #include #include #include // using namespace blaze; #include template class blaze_interface { public : typedef real real_type ; typedef std::vector stl_vector; typedef std::vector stl_matrix; typedef blaze::DynamicMatrix gene_matrix; typedef blaze::DynamicVector gene_vector; static inline std::string name() { return "blaze"; } static void free_matrix(gene_matrix & A, int N){ return ; } static void free_vector(gene_vector & B){ return ; } static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ A.resize(A_stl[0].size(), A_stl.size()); for (int j=0; j ipvt(N); // lu_factor(R, ipvt); // } // static inline void trisolve_lower(const gene_matrix & L, const gene_vector& B, gene_vector & X, int N){ // X = lower_trisolve(L, B); // } static inline void copy_matrix(const gene_matrix & source, gene_matrix & cible, int N){ cible = source; } static inline void copy_vector(const gene_vector & source, gene_vector & cible, int N){ cible = source; } }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/blaze/main.cpp ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "blaze_interface.hh" #include "bench.hh" #include "basic_actions.hh" BTL_MAIN; int main() { bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/blitz/blitz_LU_solve_interface.hh ================================================ //===================================================== // File : blitz_LU_solve_interface.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:31 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef BLITZ_LU_SOLVE_INTERFACE_HH #define BLITZ_LU_SOLVE_INTERFACE_HH #include "blitz/array.h" #include BZ_USING_NAMESPACE(blitz) template class blitz_LU_solve_interface : public blitz_interface { public : typedef typename blitz_interface::gene_matrix gene_matrix; typedef typename blitz_interface::gene_vector gene_vector; typedef blitz::Array Pivot_Vector; inline static void new_Pivot_Vector(Pivot_Vector & pivot,int N) { pivot.resize(N); } inline static void free_Pivot_Vector(Pivot_Vector & pivot) { return; } static inline real matrix_vector_product_sliced(const gene_matrix & A, gene_vector B, int row, int col_start, int col_end) { real somme=0.; for (int j=col_start ; j=big ) big = abs( LU( i, j ) ) ; } if( big==0. ) { INFOS( "blitz_LU_factor::Singular matrix" ) ; exit( 0 ) ; } ImplicitScaling( i ) = 1./big ; } // Loop over columns of Crout's method : for( int j=0; j=big ) { dum = ImplicitScaling( i )*abs( theSum ) ; big = dum ; index_max = i ; } } // Interchanging rows and the scale factor : if( j!=index_max ) { for( int k=0; k=0; i-- ) { theSum = X( i ) ; // theSum = B( i ) ; theSum -= matrix_vector_product_sliced(LU, X, i, i+1, N) ; // theSum -= sum( LU( i, Range( i+1, toEnd ) )*X( Range( i+1, toEnd ) ) ) ; // theSum -= sum( LU( i, Range( i+1, toEnd ) )*B( Range( i+1, toEnd ) ) ) ; // Store a component of the solution vector : X( i ) = theSum/LU( i, i ) ; // B( i ) = theSum/LU( i, i ) ; } } }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/blitz/blitz_interface.hh ================================================ //===================================================== // File : blitz_interface.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:30 CEST 2002 // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef BLITZ_INTERFACE_HH #define BLITZ_INTERFACE_HH #include #include #include #include #include #include BZ_USING_NAMESPACE(blitz) template class blitz_interface{ public : typedef real real_type ; typedef std::vector stl_vector; typedef std::vector stl_matrix; typedef blitz::Array gene_matrix; typedef blitz::Array gene_vector; // typedef blitz::Matrix gene_matrix; // typedef blitz::Vector gene_vector; static inline std::string name() { return "blitz"; } static void free_matrix(gene_matrix & A, int N){} static void free_vector(gene_vector & B){} static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ A.resize(A_stl[0].size(),A_stl.size()); for (int j=0; j(source); // for (int i=0;i(source); cible = source; } }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/blitz/btl_blitz.cpp ================================================ //===================================================== // File : main.cpp // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:30 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "blitz_interface.hh" #include "blitz_LU_solve_interface.hh" #include "bench.hh" #include "action_matrix_vector_product.hh" #include "action_matrix_matrix_product.hh" #include "action_axpy.hh" #include "action_lu_solve.hh" #include "action_ata_product.hh" #include "action_aat_product.hh" #include "action_atv_product.hh" BTL_MAIN; int main() { bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); //bench > >(MIN_LU,MAX_LU,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/blitz/btl_tiny_blitz.cpp ================================================ //===================================================== // File : main.cpp // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:30 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "tiny_blitz_interface.hh" #include "static/bench_static.hh" #include "action_matrix_vector_product.hh" #include "action_matrix_matrix_product.hh" #include "action_axpy.hh" BTL_MAIN; int main() { bench_static(); bench_static(); bench_static(); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/blitz/tiny_blitz_interface.hh ================================================ //===================================================== // File : tiny_blitz_interface.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:30 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef TINY_BLITZ_INTERFACE_HH #define TINY_BLITZ_INTERFACE_HH #include "blitz/array.h" #include "blitz/tiny.h" #include "blitz/tinymat.h" #include "blitz/tinyvec.h" #include #include BZ_USING_NAMESPACE(blitz) template class tiny_blitz_interface { public : typedef real real_type ; typedef std::vector stl_vector; typedef std::vector stl_matrix; typedef TinyVector gene_vector; typedef TinyMatrix gene_matrix; static inline std::string name() { return "tiny_blitz"; } static void free_matrix(gene_matrix & A, int N){} static void free_vector(gene_vector & B){} static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ for (int j=0; j //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "eigen3_interface.hh" #include "static/bench_static.hh" #include "action_matrix_vector_product.hh" #include "action_matrix_matrix_product.hh" #include "action_axpy.hh" #include "action_lu_solve.hh" #include "action_ata_product.hh" #include "action_aat_product.hh" #include "action_atv_product.hh" #include "action_cholesky.hh" #include "action_trisolve.hh" BTL_MAIN; int main() { bench_static(); bench_static(); bench_static(); bench_static(); bench_static(); bench_static(); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/eigen2/eigen2_interface.hh ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef EIGEN2_INTERFACE_HH #define EIGEN2_INTERFACE_HH // #include #include #include #include #include #include #include "btl.hh" using namespace Eigen; template class eigen2_interface { public : enum {IsFixedSize = (SIZE!=Dynamic)}; typedef real real_type; typedef std::vector stl_vector; typedef std::vector stl_matrix; typedef Eigen::Matrix gene_matrix; typedef Eigen::Matrix gene_vector; static inline std::string name( void ) { #if defined(EIGEN_VECTORIZE_SSE) if (SIZE==Dynamic) return "eigen2"; else return "tiny_eigen2"; #elif defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX) if (SIZE==Dynamic) return "eigen2"; else return "tiny_eigen2"; #else if (SIZE==Dynamic) return "eigen2_novec"; else return "tiny_eigen2_novec"; #endif } static void free_matrix(gene_matrix & A, int N) {} static void free_vector(gene_vector & B) {} static BTL_DONT_INLINE void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ A.resize(A_stl[0].size(), A_stl.size()); for (int j=0; j().solveTriangular(B); } static inline void trisolve_lower_matrix(const gene_matrix & L, const gene_matrix& B, gene_matrix& X, int N){ X = L.template marked().solveTriangular(B); } static inline void cholesky(const gene_matrix & X, gene_matrix & C, int N){ C = X.llt().matrixL(); // C = X; // Cholesky::computeInPlace(C); // Cholesky::computeInPlaceBlock(C); } static inline void lu_decomp(const gene_matrix & X, gene_matrix & C, int N){ C = X.lu().matrixLU(); // C = X.inverse(); } static inline void tridiagonalization(const gene_matrix & X, gene_matrix & C, int N){ C = Tridiagonalization(X).packedMatrix(); } static inline void hessenberg(const gene_matrix & X, gene_matrix & C, int N){ C = HessenbergDecomposition(X).packedMatrix(); } }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/eigen2/main_adv.cpp ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "eigen2_interface.hh" #include "bench.hh" #include "action_trisolve.hh" #include "action_trisolve_matrix.hh" #include "action_cholesky.hh" #include "action_hessenberg.hh" #include "action_lu_decomp.hh" // #include "action_partial_lu.hh" BTL_MAIN; int main() { bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); // bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/eigen2/main_linear.cpp ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "eigen2_interface.hh" #include "bench.hh" #include "basic_actions.hh" BTL_MAIN; int main() { bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/eigen2/main_matmat.cpp ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "eigen2_interface.hh" #include "bench.hh" #include "basic_actions.hh" BTL_MAIN; int main() { bench > >(MIN_MM,MAX_MM,NB_POINT); // bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); // bench > >(MIN_MM,MAX_MM,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/eigen2/main_vecmat.cpp ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "eigen2_interface.hh" #include "bench.hh" #include "basic_actions.hh" BTL_MAIN; int main() { bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); // bench > >(MIN_MV,MAX_MV,NB_POINT); // bench > >(MIN_MV,MAX_MV,NB_POINT); // bench > >(MIN_MV,MAX_MV,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/eigen3/btl_tiny_eigen3.cpp ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "eigen3_interface.hh" #include "static/bench_static.hh" #include "action_matrix_vector_product.hh" #include "action_matrix_matrix_product.hh" #include "action_axpy.hh" #include "action_lu_solve.hh" #include "action_ata_product.hh" #include "action_aat_product.hh" #include "action_atv_product.hh" #include "action_cholesky.hh" #include "action_trisolve.hh" BTL_MAIN; int main() { bench_static(); bench_static(); bench_static(); bench_static(); bench_static(); bench_static(); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/eigen3/eigen3_interface.hh ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef EIGEN3_INTERFACE_HH #define EIGEN3_INTERFACE_HH #include #include #include "btl.hh" using namespace Eigen; template class eigen3_interface { public : enum {IsFixedSize = (SIZE!=Dynamic)}; typedef real real_type; typedef std::vector stl_vector; typedef std::vector stl_matrix; typedef Eigen::Matrix gene_matrix; typedef Eigen::Matrix gene_vector; static inline std::string name( void ) { return EIGEN_MAKESTRING(BTL_PREFIX); } static void free_matrix(gene_matrix & /*A*/, int /*N*/) {} static void free_vector(gene_vector & /*B*/) {} static BTL_DONT_INLINE void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ A.resize(A_stl[0].size(), A_stl.size()); for (unsigned int j=0; j().setZero(); X.template selfadjointView().rankUpdate(A.transpose()); } static inline void aat_product(const gene_matrix & A, gene_matrix & X, int /*N*/){ X.template triangularView().setZero(); X.template selfadjointView().rankUpdate(A); } static inline void matrix_vector_product(const gene_matrix & A, const gene_vector & B, gene_vector & X, int /*N*/){ X.noalias() = A*B; } static inline void symv(const gene_matrix & A, const gene_vector & B, gene_vector & X, int /*N*/){ X.noalias() = (A.template selfadjointView() * B); // internal::product_selfadjoint_vector(N,A.data(),N, B.data(), 1, X.data(), 1); } template static void triassign(Dest& dst, const Src& src) { typedef typename Dest::Scalar Scalar; typedef typename internal::packet_traits::type Packet; const int PacketSize = sizeof(Packet)/sizeof(Scalar); int size = dst.cols(); for(int j=0; j(j, index, src); else dst.template copyPacket(index, j, src); } // do the non-vectorizable part of the assignment for (int index = alignedEnd; index(N,A.data(),N, X.data(), 1, Y.data(), 1, -1); for(int j=0; j(c,s)); } static inline void atv_product(gene_matrix & A, gene_vector & B, gene_vector & X, int /*N*/){ X.noalias() = (A.transpose()*B); } static inline void axpy(real coef, const gene_vector & X, gene_vector & Y, int /*N*/){ Y += coef * X; } static inline void axpby(real a, const gene_vector & X, real b, gene_vector & Y, int /*N*/){ Y = a*X + b*Y; } static EIGEN_DONT_INLINE void copy_matrix(const gene_matrix & source, gene_matrix & cible, int /*N*/){ cible = source; } static EIGEN_DONT_INLINE void copy_vector(const gene_vector & source, gene_vector & cible, int /*N*/){ cible = source; } static inline void trisolve_lower(const gene_matrix & L, const gene_vector& B, gene_vector& X, int /*N*/){ X = L.template triangularView().solve(B); } static inline void trisolve_lower_matrix(const gene_matrix & L, const gene_matrix& B, gene_matrix& X, int /*N*/){ X = L.template triangularView().solve(B); } static inline void trmm(const gene_matrix & L, const gene_matrix& B, gene_matrix& X, int /*N*/){ X.noalias() = L.template triangularView() * B; } static inline void cholesky(const gene_matrix & X, gene_matrix & C, int /*N*/){ C = X; internal::llt_inplace::blocked(C); //C = X.llt().matrixL(); // C = X; // Cholesky::computeInPlace(C); // Cholesky::computeInPlaceBlock(C); } static inline void lu_decomp(const gene_matrix & X, gene_matrix & C, int /*N*/){ C = X.fullPivLu().matrixLU(); } static inline void partial_lu_decomp(const gene_matrix & X, gene_matrix & C, int N){ Matrix piv(N); DenseIndex nb; C = X; internal::partial_lu_inplace(C,piv,nb); // C = X.partialPivLu().matrixLU(); } static inline void tridiagonalization(const gene_matrix & X, gene_matrix & C, int N){ typename Tridiagonalization::CoeffVectorType aux(N-1); C = X; internal::tridiagonalization_inplace(C, aux); } static inline void hessenberg(const gene_matrix & X, gene_matrix & C, int /*N*/){ C = HessenbergDecomposition(X).packedMatrix(); } }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/eigen3/main_adv.cpp ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "eigen3_interface.hh" #include "bench.hh" #include "action_trisolve.hh" #include "action_trisolve_matrix.hh" #include "action_cholesky.hh" #include "action_hessenberg.hh" #include "action_lu_decomp.hh" #include "action_partial_lu.hh" BTL_MAIN; int main() { bench > >(MIN_LU,MAX_LU,NB_POINT); bench > >(MIN_LU,MAX_LU,NB_POINT); bench > >(MIN_LU,MAX_LU,NB_POINT); // bench > >(MIN_LU,MAX_LU,NB_POINT); bench > >(MIN_LU,MAX_LU,NB_POINT); // bench > >(MIN_LU,MAX_LU,NB_POINT); bench > >(MIN_LU,MAX_LU,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/eigen3/main_linear.cpp ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "eigen3_interface.hh" #include "bench.hh" #include "basic_actions.hh" BTL_MAIN; int main() { bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/eigen3/main_matmat.cpp ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "eigen3_interface.hh" #include "bench.hh" #include "basic_actions.hh" BTL_MAIN; int main() { bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/eigen3/main_vecmat.cpp ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "eigen3_interface.hh" #include "bench.hh" #include "basic_actions.hh" BTL_MAIN; int main() { bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/gmm/gmm_LU_solve_interface.hh ================================================ //===================================================== // File : blitz_LU_solve_interface.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:31 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef BLITZ_LU_SOLVE_INTERFACE_HH #define BLITZ_LU_SOLVE_INTERFACE_HH #include "blitz/array.h" #include BZ_USING_NAMESPACE(blitz) template class blitz_LU_solve_interface : public blitz_interface { public : typedef typename blitz_interface::gene_matrix gene_matrix; typedef typename blitz_interface::gene_vector gene_vector; typedef blitz::Array Pivot_Vector; inline static void new_Pivot_Vector(Pivot_Vector & pivot,int N) { pivot.resize(N); } inline static void free_Pivot_Vector(Pivot_Vector & pivot) { return; } static inline real matrix_vector_product_sliced(const gene_matrix & A, gene_vector B, int row, int col_start, int col_end) { real somme=0.; for (int j=col_start ; j=big ) big = abs( LU( i, j ) ) ; } if( big==0. ) { INFOS( "blitz_LU_factor::Singular matrix" ) ; exit( 0 ) ; } ImplicitScaling( i ) = 1./big ; } // Loop over columns of Crout's method : for( int j=0; j=big ) { dum = ImplicitScaling( i )*abs( theSum ) ; big = dum ; index_max = i ; } } // Interchanging rows and the scale factor : if( j!=index_max ) { for( int k=0; k=0; i-- ) { theSum = X( i ) ; // theSum = B( i ) ; theSum -= matrix_vector_product_sliced(LU, X, i, i+1, N) ; // theSum -= sum( LU( i, Range( i+1, toEnd ) )*X( Range( i+1, toEnd ) ) ) ; // theSum -= sum( LU( i, Range( i+1, toEnd ) )*B( Range( i+1, toEnd ) ) ) ; // Store a component of the solution vector : X( i ) = theSum/LU( i, i ) ; // B( i ) = theSum/LU( i, i ) ; } } }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/gmm/gmm_interface.hh ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef GMM_INTERFACE_HH #define GMM_INTERFACE_HH #include #include using namespace gmm; template class gmm_interface { public : typedef real real_type ; typedef std::vector stl_vector; typedef std::vector stl_matrix; typedef gmm::dense_matrix gene_matrix; typedef stl_vector gene_vector; static inline std::string name( void ) { return "gmm"; } static void free_matrix(gene_matrix & A, int N){ return ; } static void free_vector(gene_vector & B){ return ; } static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ A.resize(A_stl[0].size(),A_stl.size()); for (int j=0; j ipvt(N); gmm::lu_factor(R, ipvt); } static inline void hessenberg(const gene_matrix & X, gene_matrix & R, int N){ gmm::copy(X,R); gmm::Hessenberg_reduction(R,X,false); } static inline void tridiagonalization(const gene_matrix & X, gene_matrix & R, int N){ gmm::copy(X,R); gmm::Householder_tridiagonalization(R,X,false); } }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/gmm/main.cpp ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "gmm_interface.hh" #include "bench.hh" #include "basic_actions.hh" #include "action_hessenberg.hh" #include "action_partial_lu.hh" BTL_MAIN; int main() { bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); // bench > >(MIN_MM,MAX_MM,NB_POINT); // bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); //bench > >(MIN_LU,MAX_LU,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/mtl4/.kdbgrc.main ================================================ [General] DebuggerCmdStr= DriverName=GDB FileVersion=1 OptionsSelected= ProgramArgs= TTYLevel=7 WorkingDirectory= [Memory] ColumnWidths=80,0 NumExprs=0 ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/mtl4/main.cpp ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "mtl4_interface.hh" #include "bench.hh" #include "basic_actions.hh" #include "action_cholesky.hh" // #include "action_lu_decomp.hh" BTL_MAIN; int main() { bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); // bench > >(MIN_MM,MAX_MM,NB_POINT); // bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); // bench > >(MIN_MM,MAX_MM,NB_POINT); // bench > >(MIN_MM,MAX_MM,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/mtl4/mtl4_LU_solve_interface.hh ================================================ //===================================================== // File : blitz_LU_solve_interface.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:31 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef BLITZ_LU_SOLVE_INTERFACE_HH #define BLITZ_LU_SOLVE_INTERFACE_HH #include "blitz/array.h" #include BZ_USING_NAMESPACE(blitz) template class blitz_LU_solve_interface : public blitz_interface { public : typedef typename blitz_interface::gene_matrix gene_matrix; typedef typename blitz_interface::gene_vector gene_vector; typedef blitz::Array Pivot_Vector; inline static void new_Pivot_Vector(Pivot_Vector & pivot,int N) { pivot.resize(N); } inline static void free_Pivot_Vector(Pivot_Vector & pivot) { return; } static inline real matrix_vector_product_sliced(const gene_matrix & A, gene_vector B, int row, int col_start, int col_end) { real somme=0.; for (int j=col_start ; j=big ) big = abs( LU( i, j ) ) ; } if( big==0. ) { INFOS( "blitz_LU_factor::Singular matrix" ) ; exit( 0 ) ; } ImplicitScaling( i ) = 1./big ; } // Loop over columns of Crout's method : for( int j=0; j=big ) { dum = ImplicitScaling( i )*abs( theSum ) ; big = dum ; index_max = i ; } } // Interchanging rows and the scale factor : if( j!=index_max ) { for( int k=0; k=0; i-- ) { theSum = X( i ) ; // theSum = B( i ) ; theSum -= matrix_vector_product_sliced(LU, X, i, i+1, N) ; // theSum -= sum( LU( i, Range( i+1, toEnd ) )*X( Range( i+1, toEnd ) ) ) ; // theSum -= sum( LU( i, Range( i+1, toEnd ) )*B( Range( i+1, toEnd ) ) ) ; // Store a component of the solution vector : X( i ) = theSum/LU( i, i ) ; // B( i ) = theSum/LU( i, i ) ; } } }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/mtl4/mtl4_interface.hh ================================================ //===================================================== // Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef MTL4_INTERFACE_HH #define MTL4_INTERFACE_HH #include #include // #include #include using namespace mtl; template class mtl4_interface { public : typedef real real_type ; typedef std::vector stl_vector; typedef std::vector stl_matrix; typedef mtl::dense2D > gene_matrix; typedef mtl::dense_vector gene_vector; static inline std::string name() { return "mtl4"; } static void free_matrix(gene_matrix & A, int N){ return ; } static void free_vector(gene_vector & B){ return ; } static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ A.change_dim(A_stl[0].size(), A_stl.size()); for (int j=0; j C(N,N); // C = B; // X = (A*C); } static inline void transposed_matrix_matrix_product(const gene_matrix & A, const gene_matrix & B, gene_matrix & X, int N){ X = (trans(A)*trans(B)); } // static inline void ata_product(const gene_matrix & A, gene_matrix & X, int N){ // X = (trans(A)*A); // } static inline void aat_product(const gene_matrix & A, gene_matrix & X, int N){ X = (A*trans(A)); } static inline void matrix_vector_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ X = (A*B); } static inline void atv_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ X = (trans(A)*B); } static inline void axpy(const real coef, const gene_vector & X, gene_vector & Y, int N){ Y += coef * X; } static inline void axpby(real a, const gene_vector & X, real b, gene_vector & Y, int N){ Y = a*X + b*Y; } // static inline void cholesky(const gene_matrix & X, gene_matrix & C, int N){ // C = X; // recursive_cholesky(C); // } // static inline void lu_decomp(const gene_matrix & X, gene_matrix & R, int N){ // R = X; // std::vector ipvt(N); // lu_factor(R, ipvt); // } static inline void trisolve_lower(const gene_matrix & L, const gene_vector& B, gene_vector & X, int N){ X = lower_trisolve(L, B); } static inline void copy_matrix(const gene_matrix & source, gene_matrix & cible, int N){ cible = source; } static inline void copy_vector(const gene_vector & source, gene_vector & cible, int N){ cible = source; } }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/tensors/main_linear.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "utilities.h" #include "tensor_interface.hh" #include "bench.hh" #include "basic_actions.hh" BTL_MAIN; int main() { bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/tensors/main_matmat.cpp ================================================ //===================================================== // Copyright (C) 2014 Benoit Steiner //===================================================== // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // #include "utilities.h" #include "tensor_interface.hh" #include "bench.hh" #include "basic_actions.hh" BTL_MAIN; int main() { bench > >(MIN_MM,MAX_MM,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/tensors/main_vecmat.cpp ================================================ //===================================================== // Copyright (C) 2014 Benoit Steiner //===================================================== // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // #include "utilities.h" #include "tensor_interface.hh" #include "bench.hh" #include "basic_actions.hh" BTL_MAIN; int main() { bench > >(MIN_MV,MAX_MV,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/tensors/tensor_interface.hh ================================================ //===================================================== // Copyright (C) 2014 Benoit Steiner //===================================================== // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // #ifndef TENSOR_INTERFACE_HH #define TENSOR_INTERFACE_HH #include #include #include "btl.hh" using namespace Eigen; template class tensor_interface { public : typedef real real_type; typedef typename Eigen::Tensor::Index Index; typedef std::vector stl_vector; typedef std::vector stl_matrix; typedef Eigen::Tensor gene_matrix; typedef Eigen::Tensor gene_vector; static inline std::string name( void ) { return EIGEN_MAKESTRING(BTL_PREFIX); } static void free_matrix(gene_matrix & /*A*/, int /*N*/) {} static void free_vector(gene_vector & /*B*/) {} static BTL_DONT_INLINE void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ A.resize(Eigen::array(A_stl[0].size(), A_stl.size())); for (unsigned int j=0; j(i,j)) = A_stl[j][i]; } } } static BTL_DONT_INLINE void vector_from_stl(gene_vector & B, stl_vector & B_stl){ B.resize(B_stl.size()); for (unsigned int i=0; i(i,j)); } } } static inline void matrix_matrix_product(const gene_matrix & A, const gene_matrix & B, gene_matrix & X, int /*N*/){ typedef typename Eigen::Tensor::DimensionPair DimPair; const Eigen::array dims(DimPair(1, 0)); X/*.noalias()*/ = A.contract(B, dims); } static inline void matrix_vector_product(const gene_matrix & A, const gene_vector & B, gene_vector & X, int /*N*/){ typedef typename Eigen::Tensor::DimensionPair DimPair; const Eigen::array dims(DimPair(1, 0)); X/*.noalias()*/ = A.contract(B, dims); } static inline void axpy(real coef, const gene_vector & X, gene_vector & Y, int /*N*/){ Y += X.constant(coef) * X; } static inline void axpby(real a, const gene_vector & X, real b, gene_vector & Y, int /*N*/){ Y = X.constant(a)*X + Y.constant(b)*Y; } static EIGEN_DONT_INLINE void copy_matrix(const gene_matrix & source, gene_matrix & cible, int /*N*/){ cible = source; } static EIGEN_DONT_INLINE void copy_vector(const gene_vector & source, gene_vector & cible, int /*N*/){ cible = source; } }; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/tvmet/main.cpp ================================================ //===================================================== // File : main.cpp // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:30 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "tvmet_interface.hh" #include "static/bench_static.hh" #include "action_matrix_vector_product.hh" #include "action_matrix_matrix_product.hh" #include "action_atv_product.hh" #include "action_axpy.hh" BTL_MAIN; int main() { bench_static(); bench_static(); bench_static(); bench_static(); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/tvmet/tvmet_interface.hh ================================================ //===================================================== // File : tvmet_interface.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:30 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef TVMET_INTERFACE_HH #define TVMET_INTERFACE_HH #include #include #include #include using namespace tvmet; template class tvmet_interface{ public : typedef real real_type ; typedef std::vector stl_vector; typedef std::vector stl_matrix; typedef Vector gene_vector; typedef Matrix gene_matrix; static inline std::string name() { return "tiny_tvmet"; } static void free_matrix(gene_matrix & A, int N){} static void free_vector(gene_vector & B){} static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ for (int j=0; j // Copyright (C) EDF R&D, lun sep 30 14:23:27 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" #include "ublas_interface.hh" #include "bench.hh" #include "basic_actions.hh" BTL_MAIN; int main() { bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); // bench > >(MIN_MM,MAX_MM,NB_POINT); // bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/btl/libs/ublas/ublas_interface.hh ================================================ //===================================================== // File : ublas_interface.hh // Author : L. Plagne // Copyright (C) EDF R&D, lun sep 30 14:23:27 CEST 2002 //===================================================== // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #ifndef UBLAS_INTERFACE_HH #define UBLAS_INTERFACE_HH #include #include #include #include using namespace boost::numeric; template class ublas_interface{ public : typedef real real_type ; typedef std::vector stl_vector; typedef std::vector stl_matrix; typedef typename boost::numeric::ublas::matrix gene_matrix; typedef typename boost::numeric::ublas::vector gene_vector; static inline std::string name( void ) { return "ublas"; } static void free_matrix(gene_matrix & A, int N) {} static void free_vector(gene_vector & B) {} static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ A.resize(A_stl.size(),A_stl[0].size()); for (int j=0; j #include "../Eigen/Core" using namespace Eigen; using namespace std; #define DUMP_CPUID(CODE) {\ int abcd[4]; \ abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;\ EIGEN_CPUID(abcd, CODE, 0); \ std::cout << "The code " << CODE << " gives " \ << (int*)(abcd[0]) << " " << (int*)(abcd[1]) << " " \ << (int*)(abcd[2]) << " " << (int*)(abcd[3]) << " " << std::endl; \ } int main() { cout << "Eigen's L1 = " << internal::queryL1CacheSize() << endl; cout << "Eigen's L2/L3 = " << internal::queryTopLevelCacheSize() << endl; int l1, l2, l3; internal::queryCacheSizes(l1, l2, l3); cout << "Eigen's L1, L2, L3 = " << l1 << " " << l2 << " " << l3 << endl; #ifdef EIGEN_CPUID int abcd[4]; int string[8]; char* string_char = (char*)(string); // vendor ID EIGEN_CPUID(abcd,0x0,0); string[0] = abcd[1]; string[1] = abcd[3]; string[2] = abcd[2]; string[3] = 0; cout << endl; cout << "vendor id = " << string_char << endl; cout << endl; int max_funcs = abcd[0]; internal::queryCacheSizes_intel_codes(l1, l2, l3); cout << "Eigen's intel codes L1, L2, L3 = " << l1 << " " << l2 << " " << l3 << endl; if(max_funcs>=4) { internal::queryCacheSizes_intel_direct(l1, l2, l3); cout << "Eigen's intel direct L1, L2, L3 = " << l1 << " " << l2 << " " << l3 << endl; } internal::queryCacheSizes_amd(l1, l2, l3); cout << "Eigen's amd L1, L2, L3 = " << l1 << " " << l2 << " " << l3 << endl; cout << endl; // dump Intel direct method if(max_funcs>=4) { l1 = l2 = l3 = 0; int cache_id = 0; int cache_type = 0; do { abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; EIGEN_CPUID(abcd,0x4,cache_id); cache_type = (abcd[0] & 0x0F) >> 0; int cache_level = (abcd[0] & 0xE0) >> 5; // A[7:5] int ways = (abcd[1] & 0xFFC00000) >> 22; // B[31:22] int partitions = (abcd[1] & 0x003FF000) >> 12; // B[21:12] int line_size = (abcd[1] & 0x00000FFF) >> 0; // B[11:0] int sets = (abcd[2]); // C[31:0] int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1); cout << "cache[" << cache_id << "].type = " << cache_type << "\n"; cout << "cache[" << cache_id << "].level = " << cache_level << "\n"; cout << "cache[" << cache_id << "].ways = " << ways << "\n"; cout << "cache[" << cache_id << "].partitions = " << partitions << "\n"; cout << "cache[" << cache_id << "].line_size = " << line_size << "\n"; cout << "cache[" << cache_id << "].sets = " << sets << "\n"; cout << "cache[" << cache_id << "].size = " << cache_size << "\n"; cache_id++; } while(cache_type>0 && cache_id<16); } // dump everything std::cout << endl <<"Raw dump:" << endl; for(int i=0; i #include "BenchTimer.h" #include #include #include #include #include using namespace Eigen; std::map > results; std::vector labels; std::vector sizes; template EIGEN_DONT_INLINE void compute_norm_equation(Solver &solver, const MatrixType &A) { if(A.rows()!=A.cols()) solver.compute(A.transpose()*A); else solver.compute(A); } template EIGEN_DONT_INLINE void compute(Solver &solver, const MatrixType &A) { solver.compute(A); } template void bench(int id, int rows, int size = Size) { typedef Matrix Mat; typedef Matrix MatDyn; typedef Matrix MatSquare; Mat A(rows,size); A.setRandom(); if(rows==size) A = A*A.adjoint(); BenchTimer t_llt, t_ldlt, t_lu, t_fplu, t_qr, t_cpqr, t_cod, t_fpqr, t_jsvd, t_bdcsvd; int svd_opt = ComputeThinU|ComputeThinV; int tries = 5; int rep = 1000/size; if(rep==0) rep = 1; // rep = rep*rep; LLT llt(size); LDLT ldlt(size); PartialPivLU lu(size); FullPivLU fplu(size,size); HouseholderQR qr(A.rows(),A.cols()); ColPivHouseholderQR cpqr(A.rows(),A.cols()); CompleteOrthogonalDecomposition cod(A.rows(),A.cols()); FullPivHouseholderQR fpqr(A.rows(),A.cols()); JacobiSVD jsvd(A.rows(),A.cols()); BDCSVD bdcsvd(A.rows(),A.cols()); BENCH(t_llt, tries, rep, compute_norm_equation(llt,A)); BENCH(t_ldlt, tries, rep, compute_norm_equation(ldlt,A)); BENCH(t_lu, tries, rep, compute_norm_equation(lu,A)); if(size<=1000) BENCH(t_fplu, tries, rep, compute_norm_equation(fplu,A)); BENCH(t_qr, tries, rep, compute(qr,A)); BENCH(t_cpqr, tries, rep, compute(cpqr,A)); BENCH(t_cod, tries, rep, compute(cod,A)); if(size*rows<=10000000) BENCH(t_fpqr, tries, rep, compute(fpqr,A)); if(size<500) // JacobiSVD is really too slow for too large matrices BENCH(t_jsvd, tries, rep, jsvd.compute(A,svd_opt)); // if(size*rows<=20000000) BENCH(t_bdcsvd, tries, rep, bdcsvd.compute(A,svd_opt)); results["LLT"][id] = t_llt.best(); results["LDLT"][id] = t_ldlt.best(); results["PartialPivLU"][id] = t_lu.best(); results["FullPivLU"][id] = t_fplu.best(); results["HouseholderQR"][id] = t_qr.best(); results["ColPivHouseholderQR"][id] = t_cpqr.best(); results["CompleteOrthogonalDecomposition"][id] = t_cod.best(); results["FullPivHouseholderQR"][id] = t_fpqr.best(); results["JacobiSVD"][id] = t_jsvd.best(); results["BDCSVD"][id] = t_bdcsvd.best(); } int main() { labels.push_back("LLT"); labels.push_back("LDLT"); labels.push_back("PartialPivLU"); labels.push_back("FullPivLU"); labels.push_back("HouseholderQR"); labels.push_back("ColPivHouseholderQR"); labels.push_back("CompleteOrthogonalDecomposition"); labels.push_back("FullPivHouseholderQR"); labels.push_back("JacobiSVD"); labels.push_back("BDCSVD"); for(int i=0; i(k,sizes[k](0),sizes[k](1)); } cout.width(32); cout << "solver/size"; cout << " "; for(int k=0; k=1e6) cout << "-"; else cout << r(k); cout << " "; } cout << endl; } // HTML output cout << "" << endl; cout << "" << endl; for(int k=0; k" << sizes[k](0) << "x" << sizes[k](1) << ""; cout << "" << endl; for(int i=0; i"; ArrayXf r = (results[labels[i]]*100000.f).floor()/100.f; for(int k=0; k=1e6) cout << ""; else { cout << ""; } } cout << "" << endl; } cout << "
solver/size
" << labels[i] << "-" << r(k); if(i>0) cout << " (x" << numext::round(10.f*results[labels[i]](k)/results["LLT"](k))/10.f << ")"; if(i<4 && sizes[k](0)!=sizes[k](1)) cout << " *"; cout << "
" << endl; // cout << "LLT (ms) " << (results["LLT"]*1000.).format(fmt) << "\n"; // cout << "LDLT (%) " << (results["LDLT"]/results["LLT"]).format(fmt) << "\n"; // cout << "PartialPivLU (%) " << (results["PartialPivLU"]/results["LLT"]).format(fmt) << "\n"; // cout << "FullPivLU (%) " << (results["FullPivLU"]/results["LLT"]).format(fmt) << "\n"; // cout << "HouseholderQR (%) " << (results["HouseholderQR"]/results["LLT"]).format(fmt) << "\n"; // cout << "ColPivHouseholderQR (%) " << (results["ColPivHouseholderQR"]/results["LLT"]).format(fmt) << "\n"; // cout << "CompleteOrthogonalDecomposition (%) " << (results["CompleteOrthogonalDecomposition"]/results["LLT"]).format(fmt) << "\n"; // cout << "FullPivHouseholderQR (%) " << (results["FullPivHouseholderQR"]/results["LLT"]).format(fmt) << "\n"; // cout << "JacobiSVD (%) " << (results["JacobiSVD"]/results["LLT"]).format(fmt) << "\n"; // cout << "BDCSVD (%) " << (results["BDCSVD"]/results["LLT"]).format(fmt) << "\n"; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/eig33.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // The computeRoots function included in this is based on materials // covered by the following copyright and license: // // Geometric Tools, LLC // Copyright (c) 1998-2010 // Distributed under the Boost Software License, Version 1.0. // // Permission is hereby granted, free of charge, to any person or organization // obtaining a copy of the software and accompanying documentation covered by // this license (the "Software") to use, reproduce, display, distribute, // execute, and transmit the Software, and to prepare derivative works of the // Software, and to permit third-parties to whom the Software is furnished to // do so, all subject to the following: // // The copyright notices in the Software and this entire statement, including // the above license grant, this restriction and the following disclaimer, // must be included in all copies of the Software, in whole or in part, and // all derivative works of the Software, unless such copies or derivative // works are solely in the form of machine-executable object code generated by // a source language processor. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT // SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE // FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. #include #include #include #include #include using namespace Eigen; using namespace std; template inline void computeRoots(const Matrix& m, Roots& roots) { typedef typename Matrix::Scalar Scalar; const Scalar s_inv3 = 1.0/3.0; const Scalar s_sqrt3 = std::sqrt(Scalar(3.0)); // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0. The // eigenvalues are the roots to this equation, all guaranteed to be // real-valued, because the matrix is symmetric. Scalar c0 = m(0,0)*m(1,1)*m(2,2) + Scalar(2)*m(0,1)*m(0,2)*m(1,2) - m(0,0)*m(1,2)*m(1,2) - m(1,1)*m(0,2)*m(0,2) - m(2,2)*m(0,1)*m(0,1); Scalar c1 = m(0,0)*m(1,1) - m(0,1)*m(0,1) + m(0,0)*m(2,2) - m(0,2)*m(0,2) + m(1,1)*m(2,2) - m(1,2)*m(1,2); Scalar c2 = m(0,0) + m(1,1) + m(2,2); // Construct the parameters used in classifying the roots of the equation // and in solving the equation for the roots in closed form. Scalar c2_over_3 = c2*s_inv3; Scalar a_over_3 = (c1 - c2*c2_over_3)*s_inv3; if (a_over_3 > Scalar(0)) a_over_3 = Scalar(0); Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1)); Scalar q = half_b*half_b + a_over_3*a_over_3*a_over_3; if (q > Scalar(0)) q = Scalar(0); // Compute the eigenvalues by solving for the roots of the polynomial. Scalar rho = std::sqrt(-a_over_3); Scalar theta = std::atan2(std::sqrt(-q),half_b)*s_inv3; Scalar cos_theta = std::cos(theta); Scalar sin_theta = std::sin(theta); roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta; roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); } template void eigen33(const Matrix& mat, Matrix& evecs, Vector& evals) { typedef typename Matrix::Scalar Scalar; // Scale the matrix so its entries are in [-1,1]. The scaling is applied // only when at least one matrix entry has magnitude larger than 1. Scalar shift = mat.trace()/3; Matrix scaledMat = mat; scaledMat.diagonal().array() -= shift; Scalar scale = scaledMat.cwiseAbs()/*.template triangularView()*/.maxCoeff(); scale = std::max(scale,Scalar(1)); scaledMat/=scale; // Compute the eigenvalues // scaledMat.setZero(); computeRoots(scaledMat,evals); // compute the eigen vectors // **here we assume 3 different eigenvalues** // "optimized version" which appears to be slower with gcc! // Vector base; // Scalar alpha, beta; // base << scaledMat(1,0) * scaledMat(2,1), // scaledMat(1,0) * scaledMat(2,0), // -scaledMat(1,0) * scaledMat(1,0); // for(int k=0; k<2; ++k) // { // alpha = scaledMat(0,0) - evals(k); // beta = scaledMat(1,1) - evals(k); // evecs.col(k) = (base + Vector(-beta*scaledMat(2,0), -alpha*scaledMat(2,1), alpha*beta)).normalized(); // } // evecs.col(2) = evecs.col(0).cross(evecs.col(1)).normalized(); // // naive version // Matrix tmp; // tmp = scaledMat; // tmp.diagonal().array() -= evals(0); // evecs.col(0) = tmp.row(0).cross(tmp.row(1)).normalized(); // // tmp = scaledMat; // tmp.diagonal().array() -= evals(1); // evecs.col(1) = tmp.row(0).cross(tmp.row(1)).normalized(); // // tmp = scaledMat; // tmp.diagonal().array() -= evals(2); // evecs.col(2) = tmp.row(0).cross(tmp.row(1)).normalized(); // a more stable version: if((evals(2)-evals(0))<=Eigen::NumTraits::epsilon()) { evecs.setIdentity(); } else { Matrix tmp; tmp = scaledMat; tmp.diagonal ().array () -= evals (2); evecs.col (2) = tmp.row (0).cross (tmp.row (1)).normalized (); tmp = scaledMat; tmp.diagonal ().array () -= evals (1); evecs.col(1) = tmp.row (0).cross(tmp.row (1)); Scalar n1 = evecs.col(1).norm(); if(n1<=Eigen::NumTraits::epsilon()) evecs.col(1) = evecs.col(2).unitOrthogonal(); else evecs.col(1) /= n1; // make sure that evecs[1] is orthogonal to evecs[2] evecs.col(1) = evecs.col(2).cross(evecs.col(1).cross(evecs.col(2))).normalized(); evecs.col(0) = evecs.col(2).cross(evecs.col(1)); } // Rescale back to the original size. evals *= scale; evals.array()+=shift; } int main() { BenchTimer t; int tries = 10; int rep = 400000; typedef Matrix3d Mat; typedef Vector3d Vec; Mat A = Mat::Random(3,3); A = A.adjoint() * A; // Mat Q = A.householderQr().householderQ(); // A = Q * Vec(2.2424567,2.2424566,7.454353).asDiagonal() * Q.transpose(); SelfAdjointEigenSolver eig(A); BENCH(t, tries, rep, eig.compute(A)); std::cout << "Eigen iterative: " << t.best() << "s\n"; BENCH(t, tries, rep, eig.computeDirect(A)); std::cout << "Eigen direct : " << t.best() << "s\n"; Mat evecs; Vec evals; BENCH(t, tries, rep, eigen33(A,evecs,evals)); std::cout << "Direct: " << t.best() << "s\n\n"; // std::cerr << "Eigenvalue/eigenvector diffs:\n"; // std::cerr << (evals - eig.eigenvalues()).transpose() << "\n"; // for(int k=0;k<3;++k) // if(evecs.col(k).dot(eig.eigenvectors().col(k))<0) // evecs.col(k) = -evecs.col(k); // std::cerr << evecs - eig.eigenvectors() << "\n\n"; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/geometry.cpp ================================================ #include #include #include using namespace std; using namespace Eigen; #ifndef SCALAR #define SCALAR float #endif #ifndef SIZE #define SIZE 8 #endif typedef SCALAR Scalar; typedef NumTraits::Real RealScalar; typedef Matrix A; typedef Matrix B; typedef Matrix C; typedef Matrix M; template EIGEN_DONT_INLINE void transform(const Transformation& t, Data& data) { EIGEN_ASM_COMMENT("begin"); data = t * data; EIGEN_ASM_COMMENT("end"); } template EIGEN_DONT_INLINE void transform(const Quaternion& t, Data& data) { EIGEN_ASM_COMMENT("begin quat"); for(int i=0;i struct ToRotationMatrixWrapper { enum {Dim = T::Dim}; typedef typename T::Scalar Scalar; ToRotationMatrixWrapper(const T& o) : object(o) {} T object; }; template EIGEN_DONT_INLINE void transform(const ToRotationMatrixWrapper& t, Data& data) { EIGEN_ASM_COMMENT("begin quat via mat"); data = t.object.toRotationMatrix() * data; EIGEN_ASM_COMMENT("end quat via mat"); } template EIGEN_DONT_INLINE void transform(const Transform& t, Data& data) { data = (t * data.colwise().homogeneous()).template block(0,0); } template struct get_dim { enum { Dim = T::Dim }; }; template struct get_dim > { enum { Dim = R }; }; template struct bench_impl { static EIGEN_DONT_INLINE void run(const Transformation& t) { Matrix::Dim,N> data; data.setRandom(); bench_impl::run(t); BenchTimer timer; BENCH(timer,10,100000,transform(t,data)); cout.width(9); cout << timer.best() << " "; } }; template struct bench_impl { static EIGEN_DONT_INLINE void run(const Transformation&) {} }; template EIGEN_DONT_INLINE void bench(const std::string& msg, const Transformation& t) { cout << msg << " "; bench_impl::run(t); std::cout << "\n"; } int main(int argc, char ** argv) { Matrix mat34; mat34.setRandom(); Transform iso3(mat34); Transform aff3(mat34); Transform caff3(mat34); Transform proj3(mat34); Quaternion quat;quat.setIdentity(); ToRotationMatrixWrapper > quatmat(quat); Matrix mat33; mat33.setRandom(); cout.precision(4); std::cout << "N "; for(int i=0;i #include #include #include #include "eigen_src/Eigen/Core" #include "../BenchTimer.h" using namespace Eigen; #ifndef SCALAR #error SCALAR must be defined #endif typedef SCALAR Scalar; typedef Matrix Mat; template EIGEN_DONT_INLINE double bench(long m, long n, long k, const Func& f) { Mat A(m,k); Mat B(k,n); Mat C(m,n); A.setRandom(); B.setRandom(); C.setZero(); BenchTimer t; double up = 1e8*4/sizeof(Scalar); double tm0 = 4, tm1 = 10; if(NumTraits::IsComplex) { up /= 4; tm0 = 2; tm1 = 4; } double flops = 2. * m * n * k; long rep = std::max(1., std::min(100., up/flops) ); long tries = std::max(tm0, std::min(tm1, up/flops) ); BENCH(t, tries, rep, f(A,B,C)); return 1e-9 * rep * flops / t.best(); } template int main_gemm(int argc, char **argv, const Func& f) { std::vector results; std::string filename = std::string("gemm_settings.txt"); if(argc>1) filename = std::string(argv[1]); std::ifstream settings(filename); long m, n, k; while(settings >> m >> n >> k) { //std::cerr << " Testing " << m << " " << n << " " << k << std::endl; results.push_back( bench(m, n, k, f) ); } std::cout << RowVectorXd::Map(results.data(), results.size()); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/perf_monitoring/gemv.cpp ================================================ #include "gemv_common.h" EIGEN_DONT_INLINE void gemv(const Mat &A, const Vec &B, Vec &C) { C.noalias() += A * B; } int main(int argc, char **argv) { return main_gemv(argc, argv, gemv); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/perf_monitoring/gemv_common.h ================================================ #include #include #include #include #include #include "eigen_src/Eigen/Core" #include "../BenchTimer.h" using namespace Eigen; #ifndef SCALAR #error SCALAR must be defined #endif typedef SCALAR Scalar; typedef Matrix Mat; typedef Matrix Vec; template EIGEN_DONT_INLINE double bench(long m, long n, Func &f) { Mat A(m,n); Vec B(n); Vec C(m); A.setRandom(); B.setRandom(); C.setRandom(); BenchTimer t; double up = 1e8/sizeof(Scalar); double tm0 = 4, tm1 = 10; if(NumTraits::IsComplex) { up /= 4; tm0 = 2; tm1 = 4; } double flops = 2. * m * n; long rep = std::max(1., std::min(100., up/flops) ); long tries = std::max(tm0, std::min(tm1, up/flops) ); BENCH(t, tries, rep, f(A,B,C)); return 1e-9 * rep * flops / t.best(); } template int main_gemv(int argc, char **argv, Func& f) { std::vector results; std::string filename = std::string("gemv_settings.txt"); if(argc>1) filename = std::string(argv[1]); std::ifstream settings(filename); long m, n; while(settings >> m >> n) { //std::cerr << " Testing " << m << " " << n << std::endl; results.push_back( bench(m, n, f) ); } std::cout << RowVectorXd::Map(results.data(), results.size()); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/perf_monitoring/gemvt.cpp ================================================ #include "gemv_common.h" EIGEN_DONT_INLINE void gemv(const Mat &A, Vec &B, const Vec &C) { B.noalias() += A.transpose() * C; } int main(int argc, char **argv) { return main_gemv(argc, argv, gemv); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/perf_monitoring/lazy_gemm.cpp ================================================ #include #include #include #include #include "../../BenchTimer.h" using namespace Eigen; #ifndef SCALAR #error SCALAR must be defined #endif typedef SCALAR Scalar; template EIGEN_DONT_INLINE void lazy_gemm(const MatA &A, const MatB &B, MatC &C) { // escape((void*)A.data()); // escape((void*)B.data()); C.noalias() += A.lazyProduct(B); // escape((void*)C.data()); } template EIGEN_DONT_INLINE double bench() { typedef Matrix MatA; typedef Matrix MatB; typedef Matrix MatC; MatA A(m,k); MatB B(k,n); MatC C(m,n); A.setRandom(); B.setRandom(); C.setZero(); BenchTimer t; double up = 1e7*4/sizeof(Scalar); double tm0 = 10, tm1 = 20; double flops = 2. * m * n * k; long rep = std::max(10., std::min(10000., up/flops) ); long tries = std::max(tm0, std::min(tm1, up/flops) ); BENCH(t, tries, rep, lazy_gemm(A,B,C)); return 1e-9 * rep * flops / t.best(); } template double bench_t(int t) { if(t) return bench(); else return bench(); } EIGEN_DONT_INLINE double bench_mnk(int m, int n, int k, int t) { int id = m*10000 + n*100 + k; switch(id) { case 10101 : return bench_t< 1, 1, 1>(t); break; case 20202 : return bench_t< 2, 2, 2>(t); break; case 30303 : return bench_t< 3, 3, 3>(t); break; case 40404 : return bench_t< 4, 4, 4>(t); break; case 50505 : return bench_t< 5, 5, 5>(t); break; case 60606 : return bench_t< 6, 6, 6>(t); break; case 70707 : return bench_t< 7, 7, 7>(t); break; case 80808 : return bench_t< 8, 8, 8>(t); break; case 90909 : return bench_t< 9, 9, 9>(t); break; case 101010 : return bench_t<10,10,10>(t); break; case 111111 : return bench_t<11,11,11>(t); break; case 121212 : return bench_t<12,12,12>(t); break; } return 0; } int main(int argc, char **argv) { std::vector results; std::string filename = std::string("lazy_gemm_settings.txt"); if(argc>1) filename = std::string(argv[1]); std::ifstream settings(filename); long m, n, k, t; while(settings >> m >> n >> k >> t) { //std::cerr << " Testing " << m << " " << n << " " << k << std::endl; results.push_back( bench_mnk(m, n, k, t) ); } std::cout << RowVectorXd::Map(results.data(), results.size()); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/perf_monitoring/llt.cpp ================================================ #include "gemm_common.h" #include EIGEN_DONT_INLINE void llt(const Mat &A, const Mat &B, Mat &C) { C = A; C.diagonal().array() += 1000; Eigen::internal::llt_inplace::blocked(C); } int main(int argc, char **argv) { return main_gemm(argc, argv, llt); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/perf_monitoring/make_plot.sh ================================================ #!/bin/bash # base name of the bench # it reads $1.out # and generates $1.pdf WHAT=$1 bench=$2 settings_file=$3 header="rev " while read line do if [ ! -z '$line' ]; then header="$header \"$line\"" fi done < $settings_file echo $header > $WHAT.out.header cat $WHAT.out >> $WHAT.out.header echo "set title '$WHAT'" > $WHAT.gnuplot echo "set key autotitle columnhead outside " >> $WHAT.gnuplot echo "set xtics rotate 1" >> $WHAT.gnuplot echo "set term pdf color rounded enhanced fontscale 0.35 size 7in,5in" >> $WHAT.gnuplot echo set output "'"$WHAT.pdf"'" >> $WHAT.gnuplot col=`cat $settings_file | wc -l` echo "plot for [col=2:$col+1] '$WHAT.out.header' using 0:col:xticlabels(1) with lines" >> $WHAT.gnuplot echo " " >> $WHAT.gnuplot gnuplot -persist < $WHAT.gnuplot # generate a png file (thumbnail) convert -colors 256 -background white -density 300 -resize 300 -quality 0 $WHAT.pdf -background white -flatten $WHAT.png # clean rm $WHAT.out.header $WHAT.gnuplot # generate html/svg graph echo " " > $WHAT.html cat resources/chart_header.html > $WHAT.html echo 'var customSettings = {"TITLE":"","SUBTITLE":"","XLABEL":"","YLABEL":""};' >> $WHAT.html # 'data' is an array of datasets (i.e. curves), each of which is an object of the form # { # key: , # color: , # values: [{ # r: , # v: # }] # } echo 'var data = [' >> $WHAT.html col=2 while read line do if [ ! -z '$line' ]; then header="$header \"$line\"" echo '{"key":"'$line'","values":[' >> $WHAT.html i=0 while read line2 do if [ ! -z "$line2" ]; then val=`echo $line2 | cut -s -f $col -d ' '` if [ -n "$val" ]; then # skip build failures echo '{"r":'$i',"v":'$val'},' >> $WHAT.html fi fi ((i++)) done < $WHAT.out echo ']},' >> $WHAT.html fi ((col++)) done < $settings_file echo '];' >> $WHAT.html echo 'var changesets = [' >> $WHAT.html while read line2 do if [ ! -z '$line2' ]; then echo '"'`echo $line2 | cut -f 1 -d ' '`'",' >> $WHAT.html fi done < $WHAT.out echo '];' >> $WHAT.html echo 'var changesets_details = [' >> $WHAT.html while read line2 do if [ ! -z '$line2' ]; then num=`echo "$line2" | cut -f 1 -d ' '` comment=`grep ":$num" changesets.txt | cut -f 2 -d '#'` echo '"'"$comment"'",' >> $WHAT.html fi done < $WHAT.out echo '];' >> $WHAT.html echo 'var changesets_count = [' >> $WHAT.html i=0 while read line2 do if [ ! -z '$line2' ]; then echo $i ',' >> $WHAT.html fi ((i++)) done < $WHAT.out echo '];' >> $WHAT.html cat resources/chart_footer.html >> $WHAT.html ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/perf_monitoring/resources/chart_footer.html ================================================ /* setup the chart and its options */ var chart = nv.models.lineChart() .color(d3.scale.category10().range()) .margin({left: 75, bottom: 100}) .forceX([0]).forceY([0]); chart.x(function(datum){ return datum.r; }) .xAxis.options({ axisLabel: customSettings.XLABEL || 'Changeset', tickFormat: d3.format('.0f') }); chart.xAxis .tickValues(changesets_count) .tickFormat(function(d){return changesets[d]}) .rotateLabels(-90); chart.y(function(datum){ return datum.v; }) .yAxis.options({ axisLabel: customSettings.YLABEL || 'GFlops'/*, tickFormat: function(val){ return d3.format('.0f')(val) + ' GFlops'; }*/ }); chart.tooltip.headerFormatter(function(d) { return changesets[d] + '

' + changesets_details[d] + "

"; }); //chart.useInteractiveGuideline(true); d3.select('#chart').datum(data).call(chart); var plot = d3.select('#chart > g'); /* setup the title */ plot.append('text') .style('font-size', '24px') .attr('text-anchor', 'middle').attr('x', '50%').attr('y', '20px') .text(customSettings.TITLE || ''); /* ensure the chart is responsive */ nv.utils.windowResize(chart.update); ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/bench/perf_monitoring/resources/chart_header.html ================================================ $treeview $search $mathjax
Please, help us to better know about our user community by answering the following short survey: https://forms.gle/wpyrxWi18ox9Z5ae9
$projectname  $projectnumber
$projectbrief
$projectbrief
$searchbox
================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/eigendoxy_layout.xml.in ================================================ ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/eigendoxy_tabs.css ================================================ .tabs, .tabs2, .tabs3 { background-image: url('tab_b.png'); width: 100%; z-index: 101; font-size: 13px; } .tabs2 { font-size: 10px; } .tabs3 { font-size: 9px; } .tablist { margin: 0; padding: 0; display: table; } .tablist li { float: left; display: table-cell; background-image: url('tab_b.png'); line-height: 36px; list-style: none; } .tablist a { display: block; padding: 0 20px; font-weight: bold; background-image:url('tab_s.png'); background-repeat:no-repeat; background-position:right; color: #283A5D; text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); text-decoration: none; outline: none; } .tabs3 .tablist a { padding: 0 10px; } .tablist a:hover { background-image: url('tab_h.png'); background-repeat:repeat-x; color: #fff; text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); text-decoration: none; } .tablist li.current a { background-image: url('tab_a.png'); background-repeat:repeat-x; color: #fff; text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/.krazy ================================================ EXCLUDE copyright EXCLUDE license ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/CustomizingEigen_Inheritance.cpp ================================================ #include #include class MyVectorType : public Eigen::VectorXd { public: MyVectorType(void):Eigen::VectorXd() {} // This constructor allows you to construct MyVectorType from Eigen expressions template MyVectorType(const Eigen::MatrixBase& other) : Eigen::VectorXd(other) { } // This method allows you to assign Eigen expressions to MyVectorType template MyVectorType& operator=(const Eigen::MatrixBase & other) { this->Eigen::VectorXd::operator=(other); return *this; } }; int main() { MyVectorType v = MyVectorType::Ones(4); v(2) += 10; v = 2 * v; std::cout << v.transpose() << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Cwise_erf.cpp ================================================ #include #include #include using namespace Eigen; int main() { Array4d v(-0.5,2,0,-7); std::cout << v.erf() << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Cwise_erfc.cpp ================================================ #include #include #include using namespace Eigen; int main() { Array4d v(-0.5,2,0,-7); std::cout << v.erfc() << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Cwise_lgamma.cpp ================================================ #include #include #include using namespace Eigen; int main() { Array4d v(0.5,10,0,-1); std::cout << v.lgamma() << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/DenseBase_middleCols_int.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main(void) { int const N = 5; MatrixXi A(N,N); A.setRandom(); cout << "A =\n" << A << '\n' << endl; cout << "A(1..3,:) =\n" << A.middleCols(1,3) << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/DenseBase_middleRows_int.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main(void) { int const N = 5; MatrixXi A(N,N); A.setRandom(); cout << "A =\n" << A << '\n' << endl; cout << "A(2..3,:) =\n" << A.middleRows(2,2) << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/DenseBase_template_int_middleCols.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main(void) { int const N = 5; MatrixXi A(N,N); A.setRandom(); cout << "A =\n" << A << '\n' << endl; cout << "A(:,1..3) =\n" << A.middleCols<3>(1) << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/DenseBase_template_int_middleRows.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main(void) { int const N = 5; MatrixXi A(N,N); A.setRandom(); cout << "A =\n" << A << '\n' << endl; cout << "A(1..3,:) =\n" << A.middleRows<3>(1) << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/QuickStart_example.cpp ================================================ #include #include using Eigen::MatrixXd; int main() { MatrixXd m(2,2); m(0,0) = 3; m(1,0) = 2.5; m(0,1) = -1; m(1,1) = m(1,0) + m(0,1); std::cout << m << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/QuickStart_example2_dynamic.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main() { MatrixXd m = MatrixXd::Random(3,3); m = (m + MatrixXd::Constant(3,3,1.2)) * 50; cout << "m =" << endl << m << endl; VectorXd v(3); v << 1, 2, 3; cout << "m * v =" << endl << m * v << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/QuickStart_example2_fixed.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main() { Matrix3d m = Matrix3d::Random(); m = (m + Matrix3d::Constant(1.2)) * 50; cout << "m =" << endl << m << endl; Vector3d v(1,2,3); cout << "m * v =" << endl << m * v << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/TemplateKeyword_flexible.cpp ================================================ #include #include using namespace Eigen; template void copyUpperTriangularPart(MatrixBase& dst, const MatrixBase& src) { /* Note the 'template' keywords in the following line! */ dst.template triangularView() = src.template triangularView(); } int main() { MatrixXi m1 = MatrixXi::Ones(5,5); MatrixXi m2 = MatrixXi::Random(4,4); std::cout << "m2 before copy:" << std::endl; std::cout << m2 << std::endl << std::endl; copyUpperTriangularPart(m2, m1.topLeftCorner(4,4)); std::cout << "m2 after copy:" << std::endl; std::cout << m2 << std::endl << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/TemplateKeyword_simple.cpp ================================================ #include #include using namespace Eigen; void copyUpperTriangularPart(MatrixXf& dst, const MatrixXf& src) { dst.triangularView() = src.triangularView(); } int main() { MatrixXf m1 = MatrixXf::Ones(4,4); MatrixXf m2 = MatrixXf::Random(4,4); std::cout << "m2 before copy:" << std::endl; std::cout << m2 << std::endl << std::endl; copyUpperTriangularPart(m2, m1); std::cout << "m2 after copy:" << std::endl; std::cout << m2 << std::endl << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/TutorialInplaceLU.cpp ================================================ #include struct init { init() { std::cout << "[" << "init" << "]" << std::endl; } }; init init_obj; // [init] #include #include using namespace std; using namespace Eigen; int main() { MatrixXd A(2,2); A << 2, -1, 1, 3; cout << "Here is the input matrix A before decomposition:\n" << A << endl; cout << "[init]" << endl; cout << "[declaration]" << endl; PartialPivLU > lu(A); cout << "Here is the input matrix A after decomposition:\n" << A << endl; cout << "[declaration]" << endl; cout << "[matrixLU]" << endl; cout << "Here is the matrix storing the L and U factors:\n" << lu.matrixLU() << endl; cout << "[matrixLU]" << endl; cout << "[solve]" << endl; MatrixXd A0(2,2); A0 << 2, -1, 1, 3; VectorXd b(2); b << 1, 2; VectorXd x = lu.solve(b); cout << "Residual: " << (A0 * x - b).norm() << endl; cout << "[solve]" << endl; cout << "[modifyA]" << endl; A << 3, 4, -2, 1; x = lu.solve(b); cout << "Residual: " << (A0 * x - b).norm() << endl; cout << "[modifyA]" << endl; cout << "[recompute]" << endl; A0 = A; // save A lu.compute(A); x = lu.solve(b); cout << "Residual: " << (A0 * x - b).norm() << endl; cout << "[recompute]" << endl; cout << "[recompute_bis0]" << endl; MatrixXd A1(2,2); A1 << 5,-2,3,4; lu.compute(A1); cout << "Here is the input matrix A1 after decomposition:\n" << A1 << endl; cout << "[recompute_bis0]" << endl; cout << "[recompute_bis1]" << endl; x = lu.solve(b); cout << "Residual: " << (A1 * x - b).norm() << endl; cout << "[recompute_bis1]" << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/TutorialLinAlgComputeTwice.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { Matrix2f A, b; LLT llt; A << 2, -1, -1, 3; b << 1, 2, 3, 1; cout << "Here is the matrix A:\n" << A << endl; cout << "Here is the right hand side b:\n" << b << endl; cout << "Computing LLT decomposition..." << endl; llt.compute(A); cout << "The solution is:\n" << llt.solve(b) << endl; A(1,1)++; cout << "The matrix A is now:\n" << A << endl; cout << "Computing LLT decomposition..." << endl; llt.compute(A); cout << "The solution is now:\n" << llt.solve(b) << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/TutorialLinAlgExComputeSolveError.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { MatrixXd A = MatrixXd::Random(100,100); MatrixXd b = MatrixXd::Random(100,50); MatrixXd x = A.fullPivLu().solve(b); double relative_error = (A*x - b).norm() / b.norm(); // norm() is L2 norm cout << "The relative error is:\n" << relative_error << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/TutorialLinAlgExSolveColPivHouseholderQR.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { Matrix3f A; Vector3f b; A << 1,2,3, 4,5,6, 7,8,10; b << 3, 3, 4; cout << "Here is the matrix A:\n" << A << endl; cout << "Here is the vector b:\n" << b << endl; Vector3f x = A.colPivHouseholderQr().solve(b); cout << "The solution is:\n" << x << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/TutorialLinAlgExSolveLDLT.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { Matrix2f A, b; A << 2, -1, -1, 3; b << 1, 2, 3, 1; cout << "Here is the matrix A:\n" << A << endl; cout << "Here is the right hand side b:\n" << b << endl; Matrix2f x = A.ldlt().solve(b); cout << "The solution is:\n" << x << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/TutorialLinAlgInverseDeterminant.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { Matrix3f A; A << 1, 2, 1, 2, 1, 0, -1, 1, 2; cout << "Here is the matrix A:\n" << A << endl; cout << "The determinant of A is " << A.determinant() << endl; cout << "The inverse of A is:\n" << A.inverse() << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/TutorialLinAlgRankRevealing.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { Matrix3f A; A << 1, 2, 5, 2, 1, 4, 3, 0, 3; cout << "Here is the matrix A:\n" << A << endl; FullPivLU lu_decomp(A); cout << "The rank of A is " << lu_decomp.rank() << endl; cout << "Here is a matrix whose columns form a basis of the null-space of A:\n" << lu_decomp.kernel() << endl; cout << "Here is a matrix whose columns form a basis of the column-space of A:\n" << lu_decomp.image(A) << endl; // yes, have to pass the original A } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/TutorialLinAlgSVDSolve.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { MatrixXf A = MatrixXf::Random(3, 2); cout << "Here is the matrix A:\n" << A << endl; VectorXf b = VectorXf::Random(3); cout << "Here is the right hand side b:\n" << b << endl; cout << "The least-squares solution is:\n" << A.bdcSvd(ComputeThinU | ComputeThinV).solve(b) << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/TutorialLinAlgSelfAdjointEigenSolver.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { Matrix2f A; A << 1, 2, 2, 3; cout << "Here is the matrix A:\n" << A << endl; SelfAdjointEigenSolver eigensolver(A); if (eigensolver.info() != Success) abort(); cout << "The eigenvalues of A are:\n" << eigensolver.eigenvalues() << endl; cout << "Here's a matrix whose columns are eigenvectors of A \n" << "corresponding to these eigenvalues:\n" << eigensolver.eigenvectors() << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/TutorialLinAlgSetThreshold.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { Matrix2d A; A << 2, 1, 2, 0.9999999999; FullPivLU lu(A); cout << "By default, the rank of A is found to be " << lu.rank() << endl; lu.setThreshold(1e-5); cout << "With threshold 1e-5, the rank of A is found to be " << lu.rank() << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ArrayClass_accessors.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main() { ArrayXXf m(2,2); // assign some values coefficient by coefficient m(0,0) = 1.0; m(0,1) = 2.0; m(1,0) = 3.0; m(1,1) = m(0,1) + m(1,0); // print values to standard output cout << m << endl << endl; // using the comma-initializer is also allowed m << 1.0,2.0, 3.0,4.0; // print values to standard output cout << m << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ArrayClass_addition.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main() { ArrayXXf a(3,3); ArrayXXf b(3,3); a << 1,2,3, 4,5,6, 7,8,9; b << 1,2,3, 1,2,3, 1,2,3; // Adding two arrays cout << "a + b = " << endl << a + b << endl << endl; // Subtracting a scalar from an array cout << "a - 2 = " << endl << a - 2 << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ArrayClass_cwise_other.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main() { ArrayXf a = ArrayXf::Random(5); a *= 2; cout << "a =" << endl << a << endl; cout << "a.abs() =" << endl << a.abs() << endl; cout << "a.abs().sqrt() =" << endl << a.abs().sqrt() << endl; cout << "a.min(a.abs().sqrt()) =" << endl << a.min(a.abs().sqrt()) << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ArrayClass_interop.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main() { MatrixXf m(2,2); MatrixXf n(2,2); MatrixXf result(2,2); m << 1,2, 3,4; n << 5,6, 7,8; result = (m.array() + 4).matrix() * m; cout << "-- Combination 1: --" << endl << result << endl << endl; result = (m.array() * n.array()).matrix() * m; cout << "-- Combination 2: --" << endl << result << endl << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ArrayClass_interop_matrix.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main() { MatrixXf m(2,2); MatrixXf n(2,2); MatrixXf result(2,2); m << 1,2, 3,4; n << 5,6, 7,8; result = m * n; cout << "-- Matrix m*n: --" << endl << result << endl << endl; result = m.array() * n.array(); cout << "-- Array m*n: --" << endl << result << endl << endl; result = m.cwiseProduct(n); cout << "-- With cwiseProduct: --" << endl << result << endl << endl; result = m.array() + 4; cout << "-- Array m + 4: --" << endl << result << endl << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ArrayClass_mult.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main() { ArrayXXf a(2,2); ArrayXXf b(2,2); a << 1,2, 3,4; b << 5,6, 7,8; cout << "a * b = " << endl << a * b << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_BlockOperations_block_assignment.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { Array22f m; m << 1,2, 3,4; Array44f a = Array44f::Constant(0.6); cout << "Here is the array a:" << endl << a << endl << endl; a.block<2,2>(1,1) = m; cout << "Here is now a with m copied into its central 2x2 block:" << endl << a << endl << endl; a.block(0,0,2,3) = a.block(2,1,2,3); cout << "Here is now a with bottom-right 2x3 block copied into top-left 2x3 block:" << endl << a << endl << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_BlockOperations_colrow.cpp ================================================ #include #include using namespace std; int main() { Eigen::MatrixXf m(3,3); m << 1,2,3, 4,5,6, 7,8,9; cout << "Here is the matrix m:" << endl << m << endl; cout << "2nd Row: " << m.row(1) << endl; m.col(2) += 3 * m.col(0); cout << "After adding 3 times the first column into the third column, the matrix m is:\n"; cout << m << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_BlockOperations_corner.cpp ================================================ #include #include using namespace std; int main() { Eigen::Matrix4f m; m << 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12, 13,14,15,16; cout << "m.leftCols(2) =" << endl << m.leftCols(2) << endl << endl; cout << "m.bottomRows<2>() =" << endl << m.bottomRows<2>() << endl << endl; m.topLeftCorner(1,3) = m.bottomRightCorner(3,1).transpose(); cout << "After assignment, m = " << endl << m << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_BlockOperations_print_block.cpp ================================================ #include #include using namespace std; int main() { Eigen::MatrixXf m(4,4); m << 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12, 13,14,15,16; cout << "Block in the middle" << endl; cout << m.block<2,2>(1,1) << endl << endl; for (int i = 1; i <= 3; ++i) { cout << "Block of size " << i << "x" << i << endl; cout << m.block(0,0,i,i) << endl << endl; } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_BlockOperations_vector.cpp ================================================ #include #include using namespace std; int main() { Eigen::ArrayXf v(6); v << 1, 2, 3, 4, 5, 6; cout << "v.head(3) =" << endl << v.head(3) << endl << endl; cout << "v.tail<3>() = " << endl << v.tail<3>() << endl << endl; v.segment(1,4) *= 2; cout << "after 'v.segment(1,4) *= 2', v =" << endl << v << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_PartialLU_solve.cpp ================================================ #include #include #include using namespace std; using namespace Eigen; int main() { Matrix3f A; Vector3f b; A << 1,2,3, 4,5,6, 7,8,10; b << 3, 3, 4; cout << "Here is the matrix A:" << endl << A << endl; cout << "Here is the vector b:" << endl << b << endl; Vector3f x = A.lu().solve(b); cout << "The solution is:" << endl << x << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { Eigen::MatrixXf m(2,4); Eigen::VectorXf v(2); m << 1, 23, 6, 9, 3, 11, 7, 2; v << 2, 3; MatrixXf::Index index; // find nearest neighbour (m.colwise() - v).colwise().squaredNorm().minCoeff(&index); cout << "Nearest neighbour is column " << index << ":" << endl; cout << m.col(index) << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.cpp ================================================ #include #include using namespace std; int main() { Eigen::MatrixXf mat(2,4); Eigen::VectorXf v(2); mat << 1, 2, 6, 9, 3, 1, 7, 2; v << 0, 1; //add v to each column of m mat.colwise() += v; std::cout << "Broadcasting result: " << std::endl; std::cout << mat << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp ================================================ #include #include using namespace std; int main() { Eigen::MatrixXf mat(2,4); Eigen::VectorXf v(4); mat << 1, 2, 6, 9, 3, 1, 7, 2; v << 0,1,2,3; //add v to each row of m mat.rowwise() += v.transpose(); std::cout << "Broadcasting result: " << std::endl; std::cout << mat << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_colwise.cpp ================================================ #include #include using namespace std; int main() { Eigen::MatrixXf mat(2,4); mat << 1, 2, 6, 9, 3, 1, 7, 2; std::cout << "Column's maximum: " << std::endl << mat.colwise().maxCoeff() << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { MatrixXf mat(2,4); mat << 1, 2, 6, 9, 3, 1, 7, 2; MatrixXf::Index maxIndex; float maxNorm = mat.colwise().sum().maxCoeff(&maxIndex); std::cout << "Maximum sum at position " << maxIndex << std::endl; std::cout << "The corresponding vector is: " << std::endl; std::cout << mat.col( maxIndex ) << std::endl; std::cout << "And its sum is is: " << maxNorm << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { ArrayXXf a(2,2); a << 1,2, 3,4; cout << "(a > 0).all() = " << (a > 0).all() << endl; cout << "(a > 0).any() = " << (a > 0).any() << endl; cout << "(a > 0).count() = " << (a > 0).count() << endl; cout << endl; cout << "(a > 2).all() = " << (a > 2).all() << endl; cout << "(a > 2).any() = " << (a > 2).any() << endl; cout << "(a > 2).count() = " << (a > 2).count() << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { VectorXf v(2); MatrixXf m(2,2), n(2,2); v << -1, 2; m << 1,-2, -3,4; cout << "v.squaredNorm() = " << v.squaredNorm() << endl; cout << "v.norm() = " << v.norm() << endl; cout << "v.lpNorm<1>() = " << v.lpNorm<1>() << endl; cout << "v.lpNorm() = " << v.lpNorm() << endl; cout << endl; cout << "m.squaredNorm() = " << m.squaredNorm() << endl; cout << "m.norm() = " << m.norm() << endl; cout << "m.lpNorm<1>() = " << m.lpNorm<1>() << endl; cout << "m.lpNorm() = " << m.lpNorm() << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_operatornorm.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main() { MatrixXf m(2,2); m << 1,-2, -3,4; cout << "1-norm(m) = " << m.cwiseAbs().colwise().sum().maxCoeff() << " == " << m.colwise().lpNorm<1>().maxCoeff() << endl; cout << "infty-norm(m) = " << m.cwiseAbs().rowwise().sum().maxCoeff() << " == " << m.rowwise().lpNorm<1>().maxCoeff() << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_rowwise.cpp ================================================ #include #include using namespace std; int main() { Eigen::MatrixXf mat(2,4); mat << 1, 2, 6, 9, 3, 1, 7, 2; std::cout << "Row's maximum: " << std::endl << mat.rowwise().maxCoeff() << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp ================================================ #include #include using namespace std; using namespace Eigen; int main() { Eigen::MatrixXf m(2,2); m << 1, 2, 3, 4; //get location of maximum MatrixXf::Index maxRow, maxCol; float max = m.maxCoeff(&maxRow, &maxCol); //get location of minimum MatrixXf::Index minRow, minCol; float min = m.minCoeff(&minRow, &minCol); cout << "Max: " << max << ", at: " << maxRow << "," << maxCol << endl; cout << "Min: " << min << ", at: " << minRow << "," << minCol << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/Tutorial_simple_example_dynamic_size.cpp ================================================ #include #include using namespace Eigen; int main() { for (int size=1; size<=4; ++size) { MatrixXi m(size,size+1); // a (size)x(size+1)-matrix of int's for (int j=0; j #include using namespace Eigen; int main() { Matrix3f m3; m3 << 1, 2, 3, 4, 5, 6, 7, 8, 9; Matrix4f m4 = Matrix4f::Identity(); Vector4i v4(1, 2, 3, 4); std::cout << "m3\n" << m3 << "\nm4:\n" << m4 << "\nv4:\n" << v4 << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/class_Block.cpp ================================================ #include #include using namespace Eigen; using namespace std; template Eigen::Block topLeftCorner(MatrixBase& m, int rows, int cols) { return Eigen::Block(m.derived(), 0, 0, rows, cols); } template const Eigen::Block topLeftCorner(const MatrixBase& m, int rows, int cols) { return Eigen::Block(m.derived(), 0, 0, rows, cols); } int main(int, char**) { Matrix4d m = Matrix4d::Identity(); cout << topLeftCorner(4*m, 2, 3) << endl; // calls the const version topLeftCorner(m, 2, 3) *= 5; // calls the non-const version cout << "Now the matrix m is:" << endl << m << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/class_CwiseBinaryOp.cpp ================================================ #include #include using namespace Eigen; using namespace std; // define a custom template binary functor template struct MakeComplexOp { EIGEN_EMPTY_STRUCT_CTOR(MakeComplexOp) typedef complex result_type; complex operator()(const Scalar& a, const Scalar& b) const { return complex(a,b); } }; int main(int, char**) { Matrix4d m1 = Matrix4d::Random(), m2 = Matrix4d::Random(); cout << m1.binaryExpr(m2, MakeComplexOp()) << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/class_CwiseUnaryOp.cpp ================================================ #include #include using namespace Eigen; using namespace std; // define a custom template unary functor template struct CwiseClampOp { CwiseClampOp(const Scalar& inf, const Scalar& sup) : m_inf(inf), m_sup(sup) {} const Scalar operator()(const Scalar& x) const { return xm_sup ? m_sup : x); } Scalar m_inf, m_sup; }; int main(int, char**) { Matrix4d m1 = Matrix4d::Random(); cout << m1 << endl << "becomes: " << endl << m1.unaryExpr(CwiseClampOp(-0.5,0.5)) << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/class_CwiseUnaryOp_ptrfun.cpp ================================================ #include #include using namespace Eigen; using namespace std; // define function to be applied coefficient-wise double ramp(double x) { if (x > 0) return x; else return 0; } int main(int, char**) { Matrix4d m1 = Matrix4d::Random(); cout << m1 << endl << "becomes: " << endl << m1.unaryExpr(ptr_fun(ramp)) << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/class_FixedBlock.cpp ================================================ #include #include using namespace Eigen; using namespace std; template Eigen::Block topLeft2x2Corner(MatrixBase& m) { return Eigen::Block(m.derived(), 0, 0); } template const Eigen::Block topLeft2x2Corner(const MatrixBase& m) { return Eigen::Block(m.derived(), 0, 0); } int main(int, char**) { Matrix3d m = Matrix3d::Identity(); cout << topLeft2x2Corner(4*m) << endl; // calls the const version topLeft2x2Corner(m) *= 2; // calls the non-const version cout << "Now the matrix m is:" << endl << m << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/class_FixedReshaped.cpp ================================================ #include #include using namespace Eigen; using namespace std; template Eigen::Reshaped reshape_helper(MatrixBase& m) { return Eigen::Reshaped(m.derived()); } int main(int, char**) { MatrixXd m(2, 4); m << 1, 2, 3, 4, 5, 6, 7, 8; MatrixXd n = reshape_helper(m); cout << "matrix m is:" << endl << m << endl; cout << "matrix n is:" << endl << n << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/class_FixedVectorBlock.cpp ================================================ #include #include using namespace Eigen; using namespace std; template Eigen::VectorBlock firstTwo(MatrixBase& v) { return Eigen::VectorBlock(v.derived(), 0); } template const Eigen::VectorBlock firstTwo(const MatrixBase& v) { return Eigen::VectorBlock(v.derived(), 0); } int main(int, char**) { Matrix v; v << 1,2,3,4,5,6; cout << firstTwo(4*v) << endl; // calls the const version firstTwo(v) *= 2; // calls the non-const version cout << "Now the vector v is:" << endl << v << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/class_Reshaped.cpp ================================================ #include #include using namespace std; using namespace Eigen; template const Reshaped reshape_helper(const MatrixBase& m, int rows, int cols) { return Reshaped(m.derived(), rows, cols); } int main(int, char**) { MatrixXd m(3, 4); m << 1, 4, 7, 10, 2, 5, 8, 11, 3, 6, 9, 12; cout << m << endl; Ref n = reshape_helper(m, 2, 6); cout << "Matrix m is:" << endl << m << endl; cout << "Matrix n is:" << endl << n << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/class_VectorBlock.cpp ================================================ #include #include using namespace Eigen; using namespace std; template Eigen::VectorBlock segmentFromRange(MatrixBase& v, int start, int end) { return Eigen::VectorBlock(v.derived(), start, end-start); } template const Eigen::VectorBlock segmentFromRange(const MatrixBase& v, int start, int end) { return Eigen::VectorBlock(v.derived(), start, end-start); } int main(int, char**) { Matrix v; v << 1,2,3,4,5,6; cout << segmentFromRange(2*v, 2, 4) << endl; // calls the const version segmentFromRange(v, 1, 3) *= 5; // calls the non-const version cout << "Now the vector v is:" << endl << v << endl; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/function_taking_eigenbase.cpp ================================================ #include #include using namespace Eigen; template void print_size(const EigenBase& b) { std::cout << "size (rows, cols): " << b.size() << " (" << b.rows() << ", " << b.cols() << ")" << std::endl; } int main() { Vector3f v; print_size(v); // v.asDiagonal() returns a 3x3 diagonal matrix pseudo-expression print_size(v.asDiagonal()); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/function_taking_ref.cpp ================================================ #include #include using namespace Eigen; using namespace std; float inv_cond(const Ref& a) { const VectorXf sing_vals = a.jacobiSvd().singularValues(); return sing_vals(sing_vals.size()-1) / sing_vals(0); } int main() { Matrix4f m = Matrix4f::Random(); cout << "matrix m:" << endl << m << endl << endl; cout << "inv_cond(m): " << inv_cond(m) << endl; cout << "inv_cond(m(1:3,1:3)): " << inv_cond(m.topLeftCorner(3,3)) << endl; cout << "inv_cond(m+I): " << inv_cond(m+Matrix4f::Identity()) << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/make_circulant.cpp ================================================ /* This program is presented in several fragments in the doc page. Every fragment is in its own file; this file simply combines them. */ #include "make_circulant.cpp.preamble" #include "make_circulant.cpp.traits" #include "make_circulant.cpp.expression" #include "make_circulant.cpp.evaluator" #include "make_circulant.cpp.entry" #include "make_circulant.cpp.main" ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/make_circulant.cpp.entry ================================================ template Circulant makeCirculant(const Eigen::MatrixBase& arg) { return Circulant(arg.derived()); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/make_circulant.cpp.evaluator ================================================ namespace Eigen { namespace internal { template struct evaluator > : evaluator_base > { typedef Circulant XprType; typedef typename nested_eval::type ArgTypeNested; typedef typename remove_all::type ArgTypeNestedCleaned; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { CoeffReadCost = evaluator::CoeffReadCost, Flags = Eigen::ColMajor }; evaluator(const XprType& xpr) : m_argImpl(xpr.m_arg), m_rows(xpr.rows()) { } CoeffReturnType coeff(Index row, Index col) const { Index index = row - col; if (index < 0) index += m_rows; return m_argImpl.coeff(index); } evaluator m_argImpl; const Index m_rows; }; } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/make_circulant.cpp.expression ================================================ template class Circulant : public Eigen::MatrixBase > { public: Circulant(const ArgType& arg) : m_arg(arg) { EIGEN_STATIC_ASSERT(ArgType::ColsAtCompileTime == 1, YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX); } typedef typename Eigen::internal::ref_selector::type Nested; typedef Eigen::Index Index; Index rows() const { return m_arg.rows(); } Index cols() const { return m_arg.rows(); } typedef typename Eigen::internal::ref_selector::type ArgTypeNested; ArgTypeNested m_arg; }; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/make_circulant.cpp.main ================================================ int main() { Eigen::VectorXd vec(4); vec << 1, 2, 4, 8; Eigen::MatrixXd mat; mat = makeCirculant(vec); std::cout << mat << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/make_circulant.cpp.preamble ================================================ #include #include template class Circulant; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/make_circulant.cpp.traits ================================================ namespace Eigen { namespace internal { template struct traits > { typedef Eigen::Dense StorageKind; typedef Eigen::MatrixXpr XprKind; typedef typename ArgType::StorageIndex StorageIndex; typedef typename ArgType::Scalar Scalar; enum { Flags = Eigen::ColMajor, RowsAtCompileTime = ArgType::RowsAtCompileTime, ColsAtCompileTime = ArgType::RowsAtCompileTime, MaxRowsAtCompileTime = ArgType::MaxRowsAtCompileTime, MaxColsAtCompileTime = ArgType::MaxRowsAtCompileTime }; }; } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/make_circulant2.cpp ================================================ #include #include using namespace Eigen; // [circulant_func] template class circulant_functor { const ArgType &m_vec; public: circulant_functor(const ArgType& arg) : m_vec(arg) {} const typename ArgType::Scalar& operator() (Index row, Index col) const { Index index = row - col; if (index < 0) index += m_vec.size(); return m_vec(index); } }; // [circulant_func] // [square] template struct circulant_helper { typedef Matrix MatrixType; }; // [square] // [makeCirculant] template CwiseNullaryOp, typename circulant_helper::MatrixType> makeCirculant(const Eigen::MatrixBase& arg) { typedef typename circulant_helper::MatrixType MatrixType; return MatrixType::NullaryExpr(arg.size(), arg.size(), circulant_functor(arg.derived())); } // [makeCirculant] // [main] int main() { Eigen::VectorXd vec(4); vec << 1, 2, 4, 8; Eigen::MatrixXd mat; mat = makeCirculant(vec); std::cout << mat << std::endl; } // [main] ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/matrixfree_cg.cpp ================================================ #include #include #include #include #include class MatrixReplacement; using Eigen::SparseMatrix; namespace Eigen { namespace internal { // MatrixReplacement looks-like a SparseMatrix, so let's inherits its traits: template<> struct traits : public Eigen::internal::traits > {}; } } // Example of a matrix-free wrapper from a user type to Eigen's compatible type // For the sake of simplicity, this example simply wrap a Eigen::SparseMatrix. class MatrixReplacement : public Eigen::EigenBase { public: // Required typedefs, constants, and method: typedef double Scalar; typedef double RealScalar; typedef int StorageIndex; enum { ColsAtCompileTime = Eigen::Dynamic, MaxColsAtCompileTime = Eigen::Dynamic, IsRowMajor = false }; Index rows() const { return mp_mat->rows(); } Index cols() const { return mp_mat->cols(); } template Eigen::Product operator*(const Eigen::MatrixBase& x) const { return Eigen::Product(*this, x.derived()); } // Custom API: MatrixReplacement() : mp_mat(0) {} void attachMyMatrix(const SparseMatrix &mat) { mp_mat = &mat; } const SparseMatrix my_matrix() const { return *mp_mat; } private: const SparseMatrix *mp_mat; }; // Implementation of MatrixReplacement * Eigen::DenseVector though a specialization of internal::generic_product_impl: namespace Eigen { namespace internal { template struct generic_product_impl // GEMV stands for matrix-vector : generic_product_impl_base > { typedef typename Product::Scalar Scalar; template static void scaleAndAddTo(Dest& dst, const MatrixReplacement& lhs, const Rhs& rhs, const Scalar& alpha) { // This method should implement "dst += alpha * lhs * rhs" inplace, // however, for iterative solvers, alpha is always equal to 1, so let's not bother about it. assert(alpha==Scalar(1) && "scaling is not implemented"); EIGEN_ONLY_USED_FOR_DEBUG(alpha); // Here we could simply call dst.noalias() += lhs.my_matrix() * rhs, // but let's do something fancier (and less efficient): for(Index i=0; i S = Eigen::MatrixXd::Random(n,n).sparseView(0.5,1); S = S.transpose()*S; MatrixReplacement A; A.attachMyMatrix(S); Eigen::VectorXd b(n), x; b.setRandom(); // Solve Ax = b using various iterative solver with matrix-free version: { Eigen::ConjugateGradient cg; cg.compute(A); x = cg.solve(b); std::cout << "CG: #iterations: " << cg.iterations() << ", estimated error: " << cg.error() << std::endl; } { Eigen::BiCGSTAB bicg; bicg.compute(A); x = bicg.solve(b); std::cout << "BiCGSTAB: #iterations: " << bicg.iterations() << ", estimated error: " << bicg.error() << std::endl; } { Eigen::GMRES gmres; gmres.compute(A); x = gmres.solve(b); std::cout << "GMRES: #iterations: " << gmres.iterations() << ", estimated error: " << gmres.error() << std::endl; } { Eigen::DGMRES gmres; gmres.compute(A); x = gmres.solve(b); std::cout << "DGMRES: #iterations: " << gmres.iterations() << ", estimated error: " << gmres.error() << std::endl; } { Eigen::MINRES minres; minres.compute(A); x = minres.solve(b); std::cout << "MINRES: #iterations: " << minres.iterations() << ", estimated error: " << minres.error() << std::endl; } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/nullary_indexing.cpp ================================================ #include #include using namespace Eigen; // [functor] template class indexing_functor { const ArgType &m_arg; const RowIndexType &m_rowIndices; const ColIndexType &m_colIndices; public: typedef Matrix MatrixType; indexing_functor(const ArgType& arg, const RowIndexType& row_indices, const ColIndexType& col_indices) : m_arg(arg), m_rowIndices(row_indices), m_colIndices(col_indices) {} const typename ArgType::Scalar& operator() (Index row, Index col) const { return m_arg(m_rowIndices[row], m_colIndices[col]); } }; // [functor] // [function] template CwiseNullaryOp, typename indexing_functor::MatrixType> mat_indexing(const Eigen::MatrixBase& arg, const RowIndexType& row_indices, const ColIndexType& col_indices) { typedef indexing_functor Func; typedef typename Func::MatrixType MatrixType; return MatrixType::NullaryExpr(row_indices.size(), col_indices.size(), Func(arg.derived(), row_indices, col_indices)); } // [function] int main() { std::cout << "[main1]\n"; Eigen::MatrixXi A = Eigen::MatrixXi::Random(4,4); Array3i ri(1,2,1); ArrayXi ci(6); ci << 3,2,1,0,0,2; Eigen::MatrixXi B = mat_indexing(A, ri, ci); std::cout << "A =" << std::endl; std::cout << A << std::endl << std::endl; std::cout << "A([" << ri.transpose() << "], [" << ci.transpose() << "]) =" << std::endl; std::cout << B << std::endl; std::cout << "[main1]\n"; std::cout << "[main2]\n"; B = mat_indexing(A, ri+1, ci); std::cout << "A(ri+1,ci) =" << std::endl; std::cout << B << std::endl << std::endl; #if EIGEN_COMP_CXXVER >= 11 B = mat_indexing(A, ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3)); std::cout << "A(ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3)) =" << std::endl; std::cout << B << std::endl << std::endl; #endif std::cout << "[main2]\n"; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/tut_arithmetic_add_sub.cpp ================================================ #include #include using namespace Eigen; int main() { Matrix2d a; a << 1, 2, 3, 4; MatrixXd b(2,2); b << 2, 3, 1, 4; std::cout << "a + b =\n" << a + b << std::endl; std::cout << "a - b =\n" << a - b << std::endl; std::cout << "Doing a += b;" << std::endl; a += b; std::cout << "Now a =\n" << a << std::endl; Vector3d v(1,2,3); Vector3d w(1,0,0); std::cout << "-v + w - v =\n" << -v + w - v << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/tut_arithmetic_dot_cross.cpp ================================================ #include #include using namespace Eigen; using namespace std; int main() { Vector3d v(1,2,3); Vector3d w(0,1,2); cout << "Dot product: " << v.dot(w) << endl; double dp = v.adjoint()*w; // automatic conversion of the inner product to a scalar cout << "Dot product via a matrix product: " << dp << endl; cout << "Cross product:\n" << v.cross(w) << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/tut_arithmetic_matrix_mul.cpp ================================================ #include #include using namespace Eigen; int main() { Matrix2d mat; mat << 1, 2, 3, 4; Vector2d u(-1,1), v(2,0); std::cout << "Here is mat*mat:\n" << mat*mat << std::endl; std::cout << "Here is mat*u:\n" << mat*u << std::endl; std::cout << "Here is u^T*mat:\n" << u.transpose()*mat << std::endl; std::cout << "Here is u^T*v:\n" << u.transpose()*v << std::endl; std::cout << "Here is u*v^T:\n" << u*v.transpose() << std::endl; std::cout << "Let's multiply mat by itself" << std::endl; mat = mat*mat; std::cout << "Now mat is mat:\n" << mat << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/tut_arithmetic_redux_basic.cpp ================================================ #include #include using namespace std; int main() { Eigen::Matrix2d mat; mat << 1, 2, 3, 4; cout << "Here is mat.sum(): " << mat.sum() << endl; cout << "Here is mat.prod(): " << mat.prod() << endl; cout << "Here is mat.mean(): " << mat.mean() << endl; cout << "Here is mat.minCoeff(): " << mat.minCoeff() << endl; cout << "Here is mat.maxCoeff(): " << mat.maxCoeff() << endl; cout << "Here is mat.trace(): " << mat.trace() << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/tut_arithmetic_scalar_mul_div.cpp ================================================ #include #include using namespace Eigen; int main() { Matrix2d a; a << 1, 2, 3, 4; Vector3d v(1,2,3); std::cout << "a * 2.5 =\n" << a * 2.5 << std::endl; std::cout << "0.1 * v =\n" << 0.1 * v << std::endl; std::cout << "Doing v *= 2;" << std::endl; v *= 2; std::cout << "Now v =\n" << v << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/tut_matrix_coefficient_accessors.cpp ================================================ #include #include using namespace Eigen; int main() { MatrixXd m(2,2); m(0,0) = 3; m(1,0) = 2.5; m(0,1) = -1; m(1,1) = m(1,0) + m(0,1); std::cout << "Here is the matrix m:\n" << m << std::endl; VectorXd v(2); v(0) = 4; v(1) = v(0) - 1; std::cout << "Here is the vector v:\n" << v << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/tut_matrix_resize.cpp ================================================ #include #include using namespace Eigen; int main() { MatrixXd m(2,5); m.resize(4,3); std::cout << "The matrix m is of size " << m.rows() << "x" << m.cols() << std::endl; std::cout << "It has " << m.size() << " coefficients" << std::endl; VectorXd v(2); v.resize(5); std::cout << "The vector v is of size " << v.size() << std::endl; std::cout << "As a matrix, v is of size " << v.rows() << "x" << v.cols() << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/examples/tut_matrix_resize_fixed_size.cpp ================================================ #include #include using namespace Eigen; int main() { Matrix4d m; m.resize(4,4); // no operation std::cout << "The matrix m is of size " << m.rows() << "x" << m.cols() << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/.krazy ================================================ EXCLUDE copyright EXCLUDE license ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/AngleAxis_mimic_euler.cpp ================================================ Matrix3f m; m = AngleAxisf(0.25*M_PI, Vector3f::UnitX()) * AngleAxisf(0.5*M_PI, Vector3f::UnitY()) * AngleAxisf(0.33*M_PI, Vector3f::UnitZ()); cout << m << endl << "is unitary: " << m.isUnitary() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Array_initializer_list_23_cxx11.cpp ================================================ ArrayXXi a { {1, 2, 3}, {3, 4, 5} }; cout << a << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Array_initializer_list_vector_cxx11.cpp ================================================ Array v {{1, 2, 3, 4, 5}}; cout << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Array_variadic_ctor_cxx11.cpp ================================================ Array a(1, 2, 3, 4, 5, 6); Array b {1, 2, 3}; cout << a << "\n\n" << b << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/BiCGSTAB_simple.cpp ================================================ int n = 10000; VectorXd x(n), b(n); SparseMatrix A(n,n); /* ... fill A and b ... */ BiCGSTAB > solver; solver.compute(A); x = solver.solve(b); std::cout << "#iterations: " << solver.iterations() << std::endl; std::cout << "estimated error: " << solver.error() << std::endl; /* ... update b ... */ x = solver.solve(b); // solve again ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/BiCGSTAB_step_by_step.cpp ================================================ int n = 10000; VectorXd x(n), b(n); SparseMatrix A(n,n); /* ... fill A and b ... */ BiCGSTAB > solver(A); // start from a random solution x = VectorXd::Random(n); solver.setMaxIterations(1); int i = 0; do { x = solver.solveWithGuess(b,x); std::cout << i << " : " << solver.error() << std::endl; ++i; } while (solver.info()!=Success && i<100); ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/ColPivHouseholderQR_solve.cpp ================================================ Matrix3f m = Matrix3f::Random(); Matrix3f y = Matrix3f::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the matrix y:" << endl << y << endl; Matrix3f x; x = m.colPivHouseholderQr().solve(y); assert(y.isApprox(m*x)); cout << "Here is a solution x to the equation mx=y:" << endl << x << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/ComplexEigenSolver_compute.cpp ================================================ MatrixXcf A = MatrixXcf::Random(4,4); cout << "Here is a random 4x4 matrix, A:" << endl << A << endl << endl; ComplexEigenSolver ces; ces.compute(A); cout << "The eigenvalues of A are:" << endl << ces.eigenvalues() << endl; cout << "The matrix of eigenvectors, V, is:" << endl << ces.eigenvectors() << endl << endl; complex lambda = ces.eigenvalues()[0]; cout << "Consider the first eigenvalue, lambda = " << lambda << endl; VectorXcf v = ces.eigenvectors().col(0); cout << "If v is the corresponding eigenvector, then lambda * v = " << endl << lambda * v << endl; cout << "... and A * v = " << endl << A * v << endl << endl; cout << "Finally, V * D * V^(-1) = " << endl << ces.eigenvectors() * ces.eigenvalues().asDiagonal() * ces.eigenvectors().inverse() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/ComplexEigenSolver_eigenvalues.cpp ================================================ MatrixXcf ones = MatrixXcf::Ones(3,3); ComplexEigenSolver ces(ones, /* computeEigenvectors = */ false); cout << "The eigenvalues of the 3x3 matrix of ones are:" << endl << ces.eigenvalues() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/ComplexEigenSolver_eigenvectors.cpp ================================================ MatrixXcf ones = MatrixXcf::Ones(3,3); ComplexEigenSolver ces(ones); cout << "The first eigenvector of the 3x3 matrix of ones is:" << endl << ces.eigenvectors().col(0) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/ComplexSchur_compute.cpp ================================================ MatrixXcf A = MatrixXcf::Random(4,4); ComplexSchur schur(4); schur.compute(A); cout << "The matrix T in the decomposition of A is:" << endl << schur.matrixT() << endl; schur.compute(A.inverse()); cout << "The matrix T in the decomposition of A^(-1) is:" << endl << schur.matrixT() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/ComplexSchur_matrixT.cpp ================================================ MatrixXcf A = MatrixXcf::Random(4,4); cout << "Here is a random 4x4 matrix, A:" << endl << A << endl << endl; ComplexSchur schurOfA(A, false); // false means do not compute U cout << "The triangular matrix T is:" << endl << schurOfA.matrixT() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/ComplexSchur_matrixU.cpp ================================================ MatrixXcf A = MatrixXcf::Random(4,4); cout << "Here is a random 4x4 matrix, A:" << endl << A << endl << endl; ComplexSchur schurOfA(A); cout << "The unitary matrix U is:" << endl << schurOfA.matrixU() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_abs.cpp ================================================ Array3d v(1,-2,-3); cout << v.abs() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_abs2.cpp ================================================ Array3d v(1,-2,-3); cout << v.abs2() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_acos.cpp ================================================ Array3d v(0, sqrt(2.)/2, 1); cout << v.acos() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_arg.cpp ================================================ ArrayXcf v = ArrayXcf::Random(3); cout << v << endl << endl; cout << arg(v) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_array_power_array.cpp ================================================ Array x(8,25,3), e(1./3.,0.5,2.); cout << "[" << x << "]^[" << e << "] = " << x.pow(e) << endl; // using ArrayBase::pow cout << "[" << x << "]^[" << e << "] = " << pow(x,e) << endl; // using Eigen::pow ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_asin.cpp ================================================ Array3d v(0, sqrt(2.)/2, 1); cout << v.asin() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_atan.cpp ================================================ ArrayXd v = ArrayXd::LinSpaced(5,0,1); cout << v.atan() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_boolean_and.cpp ================================================ Array3d v(-1,2,1), w(-3,2,3); cout << ((vw) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_greater_equal.cpp ================================================ Array3d v(1,2,3), w(3,2,1); cout << (v>=w) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_inverse.cpp ================================================ Array3d v(2,3,4); cout << v.inverse() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_isFinite.cpp ================================================ Array3d v(1,2,3); v(1) *= 0.0/0.0; v(2) /= 0.0; cout << v << endl << endl; cout << isfinite(v) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_isInf.cpp ================================================ Array3d v(1,2,3); v(1) *= 0.0/0.0; v(2) /= 0.0; cout << v << endl << endl; cout << isinf(v) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_isNaN.cpp ================================================ Array3d v(1,2,3); v(1) *= 0.0/0.0; v(2) /= 0.0; cout << v << endl << endl; cout << isnan(v) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_less.cpp ================================================ Array3d v(1,2,3), w(3,2,1); cout << (v e(2,-3,1./3.); cout << "10^[" << e << "] = " << pow(10,e) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_sign.cpp ================================================ Array3d v(-3,5,0); cout << v.sign() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_sin.cpp ================================================ Array3d v(M_PI, M_PI/2, M_PI/3); cout << v.sin() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_sinh.cpp ================================================ ArrayXd v = ArrayXd::LinSpaced(5,0,1); cout << sinh(v) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_slash_equal.cpp ================================================ Array3d v(3,2,4), w(5,4,2); v /= w; cout << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_sqrt.cpp ================================================ Array3d v(1,2,4); cout << v.sqrt() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_square.cpp ================================================ Array3d v(2,3,4); cout << v.square() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_tan.cpp ================================================ Array3d v(M_PI, M_PI/2, M_PI/3); cout << v.tan() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_tanh.cpp ================================================ ArrayXd v = ArrayXd::LinSpaced(5,0,1); cout << tanh(v) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Cwise_times_equal.cpp ================================================ Array3d v(1,2,3), w(2,3,0); v *= w; cout << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/DenseBase_LinSpaced.cpp ================================================ cout << VectorXi::LinSpaced(4,7,10).transpose() << endl; cout << VectorXd::LinSpaced(5,0.0,1.0).transpose() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/DenseBase_LinSpacedInt.cpp ================================================ cout << "Even spacing inputs:" << endl; cout << VectorXi::LinSpaced(8,1,4).transpose() << endl; cout << VectorXi::LinSpaced(8,1,8).transpose() << endl; cout << VectorXi::LinSpaced(8,1,15).transpose() << endl; cout << "Uneven spacing inputs:" << endl; cout << VectorXi::LinSpaced(8,1,7).transpose() << endl; cout << VectorXi::LinSpaced(8,1,9).transpose() << endl; cout << VectorXi::LinSpaced(8,1,16).transpose() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/DenseBase_LinSpaced_seq_deprecated.cpp ================================================ cout << VectorXi::LinSpaced(Sequential,4,7,10).transpose() << endl; cout << VectorXd::LinSpaced(Sequential,5,0.0,1.0).transpose() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/DenseBase_setLinSpaced.cpp ================================================ VectorXf v; v.setLinSpaced(5,0.5f,1.5f); cout << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/DirectionWise_hnormalized.cpp ================================================ Matrix4Xd M = Matrix4Xd::Random(4,5); Projective3d P(Matrix4d::Random()); cout << "The matrix M is:" << endl << M << endl << endl; cout << "M.colwise().hnormalized():" << endl << M.colwise().hnormalized() << endl << endl; cout << "P*M:" << endl << P*M << endl << endl; cout << "(P*M).colwise().hnormalized():" << endl << (P*M).colwise().hnormalized() << endl << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/DirectionWise_replicate.cpp ================================================ MatrixXi m = MatrixXi::Random(2,3); cout << "Here is the matrix m:" << endl << m << endl; cout << "m.colwise().replicate<3>() = ..." << endl; cout << m.colwise().replicate<3>() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/DirectionWise_replicate_int.cpp ================================================ Vector3i v = Vector3i::Random(); cout << "Here is the vector v:" << endl << v << endl; cout << "v.rowwise().replicate(5) = ..." << endl; cout << v.rowwise().replicate(5) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/EigenSolver_EigenSolver_MatrixType.cpp ================================================ MatrixXd A = MatrixXd::Random(6,6); cout << "Here is a random 6x6 matrix, A:" << endl << A << endl << endl; EigenSolver es(A); cout << "The eigenvalues of A are:" << endl << es.eigenvalues() << endl; cout << "The matrix of eigenvectors, V, is:" << endl << es.eigenvectors() << endl << endl; complex lambda = es.eigenvalues()[0]; cout << "Consider the first eigenvalue, lambda = " << lambda << endl; VectorXcd v = es.eigenvectors().col(0); cout << "If v is the corresponding eigenvector, then lambda * v = " << endl << lambda * v << endl; cout << "... and A * v = " << endl << A.cast >() * v << endl << endl; MatrixXcd D = es.eigenvalues().asDiagonal(); MatrixXcd V = es.eigenvectors(); cout << "Finally, V * D * V^(-1) = " << endl << V * D * V.inverse() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/EigenSolver_compute.cpp ================================================ EigenSolver es; MatrixXf A = MatrixXf::Random(4,4); es.compute(A, /* computeEigenvectors = */ false); cout << "The eigenvalues of A are: " << es.eigenvalues().transpose() << endl; es.compute(A + MatrixXf::Identity(4,4), false); // re-use es to compute eigenvalues of A+I cout << "The eigenvalues of A+I are: " << es.eigenvalues().transpose() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/EigenSolver_eigenvalues.cpp ================================================ MatrixXd ones = MatrixXd::Ones(3,3); EigenSolver es(ones, false); cout << "The eigenvalues of the 3x3 matrix of ones are:" << endl << es.eigenvalues() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/EigenSolver_eigenvectors.cpp ================================================ MatrixXd ones = MatrixXd::Ones(3,3); EigenSolver es(ones); cout << "The first eigenvector of the 3x3 matrix of ones is:" << endl << es.eigenvectors().col(0) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/EigenSolver_pseudoEigenvectors.cpp ================================================ MatrixXd A = MatrixXd::Random(6,6); cout << "Here is a random 6x6 matrix, A:" << endl << A << endl << endl; EigenSolver es(A); MatrixXd D = es.pseudoEigenvalueMatrix(); MatrixXd V = es.pseudoEigenvectors(); cout << "The pseudo-eigenvalue matrix D is:" << endl << D << endl; cout << "The pseudo-eigenvector matrix V is:" << endl << V << endl; cout << "Finally, V * D * V^(-1) = " << endl << V * D * V.inverse() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/FullPivHouseholderQR_solve.cpp ================================================ Matrix3f m = Matrix3f::Random(); Matrix3f y = Matrix3f::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the matrix y:" << endl << y << endl; Matrix3f x; x = m.fullPivHouseholderQr().solve(y); assert(y.isApprox(m*x)); cout << "Here is a solution x to the equation mx=y:" << endl << x << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/FullPivLU_image.cpp ================================================ Matrix3d m; m << 1,1,0, 1,3,2, 0,1,1; cout << "Here is the matrix m:" << endl << m << endl; cout << "Notice that the middle column is the sum of the two others, so the " << "columns are linearly dependent." << endl; cout << "Here is a matrix whose columns have the same span but are linearly independent:" << endl << m.fullPivLu().image(m) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/FullPivLU_kernel.cpp ================================================ MatrixXf m = MatrixXf::Random(3,5); cout << "Here is the matrix m:" << endl << m << endl; MatrixXf ker = m.fullPivLu().kernel(); cout << "Here is a matrix whose columns form a basis of the kernel of m:" << endl << ker << endl; cout << "By definition of the kernel, m*ker is zero:" << endl << m*ker << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/FullPivLU_solve.cpp ================================================ Matrix m = Matrix::Random(); Matrix2f y = Matrix2f::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the matrix y:" << endl << y << endl; Matrix x = m.fullPivLu().solve(y); if((m*x).isApprox(y)) { cout << "Here is a solution x to the equation mx=y:" << endl << x << endl; } else cout << "The equation mx=y does not have any solution." << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/GeneralizedEigenSolver.cpp ================================================ GeneralizedEigenSolver ges; MatrixXf A = MatrixXf::Random(4,4); MatrixXf B = MatrixXf::Random(4,4); ges.compute(A, B); cout << "The (complex) numerators of the generalzied eigenvalues are: " << ges.alphas().transpose() << endl; cout << "The (real) denominatore of the generalzied eigenvalues are: " << ges.betas().transpose() << endl; cout << "The (complex) generalzied eigenvalues are (alphas./beta): " << ges.eigenvalues().transpose() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/HessenbergDecomposition_compute.cpp ================================================ MatrixXcf A = MatrixXcf::Random(4,4); HessenbergDecomposition hd(4); hd.compute(A); cout << "The matrix H in the decomposition of A is:" << endl << hd.matrixH() << endl; hd.compute(2*A); // re-use hd to compute and store decomposition of 2A cout << "The matrix H in the decomposition of 2A is:" << endl << hd.matrixH() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/HessenbergDecomposition_matrixH.cpp ================================================ Matrix4f A = MatrixXf::Random(4,4); cout << "Here is a random 4x4 matrix:" << endl << A << endl; HessenbergDecomposition hessOfA(A); MatrixXf H = hessOfA.matrixH(); cout << "The Hessenberg matrix H is:" << endl << H << endl; MatrixXf Q = hessOfA.matrixQ(); cout << "The orthogonal matrix Q is:" << endl << Q << endl; cout << "Q H Q^T is:" << endl << Q * H * Q.transpose() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/HessenbergDecomposition_packedMatrix.cpp ================================================ Matrix4d A = Matrix4d::Random(4,4); cout << "Here is a random 4x4 matrix:" << endl << A << endl; HessenbergDecomposition hessOfA(A); Matrix4d pm = hessOfA.packedMatrix(); cout << "The packed matrix M is:" << endl << pm << endl; cout << "The upper Hessenberg part corresponds to the matrix H, which is:" << endl << hessOfA.matrixH() << endl; Vector3d hc = hessOfA.householderCoefficients(); cout << "The vector of Householder coefficients is:" << endl << hc << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/HouseholderQR_householderQ.cpp ================================================ MatrixXf A(MatrixXf::Random(5,3)), thinQ(MatrixXf::Identity(5,3)), Q; A.setRandom(); HouseholderQR qr(A); Q = qr.householderQ(); thinQ = qr.householderQ() * thinQ; std::cout << "The complete unitary matrix Q is:\n" << Q << "\n\n"; std::cout << "The thin matrix Q is:\n" << thinQ << "\n\n"; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/HouseholderQR_solve.cpp ================================================ typedef Matrix Matrix3x3; Matrix3x3 m = Matrix3x3::Random(); Matrix3f y = Matrix3f::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the matrix y:" << endl << y << endl; Matrix3f x; x = m.householderQr().solve(y); assert(y.isApprox(m*x)); cout << "Here is a solution x to the equation mx=y:" << endl << x << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/HouseholderSequence_HouseholderSequence.cpp ================================================ Matrix3d v = Matrix3d::Random(); cout << "The matrix v is:" << endl; cout << v << endl; Vector3d v0(1, v(1,0), v(2,0)); cout << "The first Householder vector is: v_0 = " << v0.transpose() << endl; Vector3d v1(0, 1, v(2,1)); cout << "The second Householder vector is: v_1 = " << v1.transpose() << endl; Vector3d v2(0, 0, 1); cout << "The third Householder vector is: v_2 = " << v2.transpose() << endl; Vector3d h = Vector3d::Random(); cout << "The Householder coefficients are: h = " << h.transpose() << endl; Matrix3d H0 = Matrix3d::Identity() - h(0) * v0 * v0.adjoint(); cout << "The first Householder reflection is represented by H_0 = " << endl; cout << H0 << endl; Matrix3d H1 = Matrix3d::Identity() - h(1) * v1 * v1.adjoint(); cout << "The second Householder reflection is represented by H_1 = " << endl; cout << H1 << endl; Matrix3d H2 = Matrix3d::Identity() - h(2) * v2 * v2.adjoint(); cout << "The third Householder reflection is represented by H_2 = " << endl; cout << H2 << endl; cout << "Their product is H_0 H_1 H_2 = " << endl; cout << H0 * H1 * H2 << endl; HouseholderSequence hhSeq(v, h); Matrix3d hhSeqAsMatrix(hhSeq); cout << "If we construct a HouseholderSequence from v and h" << endl; cout << "and convert it to a matrix, we get:" << endl; cout << hhSeqAsMatrix << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/IOFormat.cpp ================================================ std::string sep = "\n----------------------------------------\n"; Matrix3d m1; m1 << 1.111111, 2, 3.33333, 4, 5, 6, 7, 8.888888, 9; IOFormat CommaInitFmt(StreamPrecision, DontAlignCols, ", ", ", ", "", "", " << ", ";"); IOFormat CleanFmt(4, 0, ", ", "\n", "[", "]"); IOFormat OctaveFmt(StreamPrecision, 0, ", ", ";\n", "", "", "[", "]"); IOFormat HeavyFmt(FullPrecision, 0, ", ", ";\n", "[", "]", "[", "]"); std::cout << m1 << sep; std::cout << m1.format(CommaInitFmt) << sep; std::cout << m1.format(CleanFmt) << sep; std::cout << m1.format(OctaveFmt) << sep; std::cout << m1.format(HeavyFmt) << sep; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/JacobiSVD_basic.cpp ================================================ MatrixXf m = MatrixXf::Random(3,2); cout << "Here is the matrix m:" << endl << m << endl; JacobiSVD svd(m, ComputeThinU | ComputeThinV); cout << "Its singular values are:" << endl << svd.singularValues() << endl; cout << "Its left singular vectors are the columns of the thin U matrix:" << endl << svd.matrixU() << endl; cout << "Its right singular vectors are the columns of the thin V matrix:" << endl << svd.matrixV() << endl; Vector3f rhs(1, 0, 0); cout << "Now consider this rhs vector:" << endl << rhs << endl; cout << "A least-squares solution of m*x = rhs is:" << endl << svd.solve(rhs) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Jacobi_makeGivens.cpp ================================================ Vector2f v = Vector2f::Random(); JacobiRotation G; G.makeGivens(v.x(), v.y()); cout << "Here is the vector v:" << endl << v << endl; v.applyOnTheLeft(0, 1, G.adjoint()); cout << "Here is the vector J' * v:" << endl << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Jacobi_makeJacobi.cpp ================================================ Matrix2f m = Matrix2f::Random(); m = (m + m.adjoint()).eval(); JacobiRotation J; J.makeJacobi(m, 0, 1); cout << "Here is the matrix m:" << endl << m << endl; m.applyOnTheLeft(0, 1, J.adjoint()); m.applyOnTheRight(0, 1, J); cout << "Here is the matrix J' * m * J:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/LLT_example.cpp ================================================ MatrixXd A(3,3); A << 4,-1,2, -1,6,0, 2,0,5; cout << "The matrix A is" << endl << A << endl; LLT lltOfA(A); // compute the Cholesky decomposition of A MatrixXd L = lltOfA.matrixL(); // retrieve factor L in the decomposition // The previous two lines can also be written as "L = A.llt().matrixL()" cout << "The Cholesky factor L is" << endl << L << endl; cout << "To check this, let us compute L * L.transpose()" << endl; cout << L * L.transpose() << endl; cout << "This should equal the matrix A" << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/LLT_solve.cpp ================================================ typedef Matrix DataMatrix; // let's generate some samples on the 3D plane of equation z = 2x+3y (with some noise) DataMatrix samples = DataMatrix::Random(12,2); VectorXf elevations = 2*samples.col(0) + 3*samples.col(1) + VectorXf::Random(12)*0.1; // and let's solve samples * [x y]^T = elevations in least square sense: Matrix xy = (samples.adjoint() * samples).llt().solve((samples.adjoint()*elevations)); cout << xy << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/LeastSquaresNormalEquations.cpp ================================================ MatrixXf A = MatrixXf::Random(3, 2); VectorXf b = VectorXf::Random(3); cout << "The solution using normal equations is:\n" << (A.transpose() * A).ldlt().solve(A.transpose() * b) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/LeastSquaresQR.cpp ================================================ MatrixXf A = MatrixXf::Random(3, 2); VectorXf b = VectorXf::Random(3); cout << "The solution using the QR decomposition is:\n" << A.colPivHouseholderQr().solve(b) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Map_general_stride.cpp ================================================ int array[24]; for(int i = 0; i < 24; ++i) array[i] = i; cout << Map > (array, 3, 3, Stride(8, 2)) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Map_inner_stride.cpp ================================================ int array[12]; for(int i = 0; i < 12; ++i) array[i] = i; cout << Map > (array, 6) // the inner stride has already been passed as template parameter << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Map_outer_stride.cpp ================================================ int array[12]; for(int i = 0; i < 12; ++i) array[i] = i; cout << Map >(array, 3, 3, OuterStride<>(4)) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Map_placement_new.cpp ================================================ int data[] = {1,2,3,4,5,6,7,8,9}; Map v(data,4); cout << "The mapped vector v is: " << v << "\n"; new (&v) Map(data+4,5); cout << "Now v is: " << v << "\n"; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Map_simple.cpp ================================================ int array[9]; for(int i = 0; i < 9; ++i) array[i] = i; cout << Map(array) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_adjoint.cpp ================================================ Matrix2cf m = Matrix2cf::Random(); cout << "Here is the 2x2 complex matrix m:" << endl << m << endl; cout << "Here is the adjoint of m:" << endl << m.adjoint() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_all.cpp ================================================ Vector3f boxMin(Vector3f::Zero()), boxMax(Vector3f::Ones()); Vector3f p0 = Vector3f::Random(), p1 = Vector3f::Random().cwiseAbs(); // let's check if p0 and p1 are inside the axis aligned box defined by the corners boxMin,boxMax: cout << "Is (" << p0.transpose() << ") inside the box: " << ((boxMin.array()p0.array()).all()) << endl; cout << "Is (" << p1.transpose() << ") inside the box: " << ((boxMin.array()p1.array()).all()) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_applyOnTheLeft.cpp ================================================ Matrix3f A = Matrix3f::Random(3,3), B; B << 0,1,0, 0,0,1, 1,0,0; cout << "At start, A = " << endl << A << endl; A.applyOnTheLeft(B); cout << "After applyOnTheLeft, A = " << endl << A << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_applyOnTheRight.cpp ================================================ Matrix3f A = Matrix3f::Random(3,3), B; B << 0,1,0, 0,0,1, 1,0,0; cout << "At start, A = " << endl << A << endl; A *= B; cout << "After A *= B, A = " << endl << A << endl; A.applyOnTheRight(B); // equivalent to A *= B cout << "After applyOnTheRight, A = " << endl << A << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_array.cpp ================================================ Vector3d v(1,2,3); v.array() += 3; v.array() -= 2; cout << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_array_const.cpp ================================================ Vector3d v(-1,2,-3); cout << "the absolute values:" << endl << v.array().abs() << endl; cout << "the absolute values plus one:" << endl << v.array().abs()+1 << endl; cout << "sum of the squares: " << v.array().square().sum() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_asDiagonal.cpp ================================================ cout << Matrix3i(Vector3i(2,5,6).asDiagonal()) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_block_int_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.block<2,2>(1,1):" << endl << m.block<2,2>(1,1) << endl; m.block<2,2>(1,1).setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_block_int_int_int_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.block(1, 1, 2, 2):" << endl << m.block(1, 1, 2, 2) << endl; m.block(1, 1, 2, 2).setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_bottomLeftCorner_int_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.bottomLeftCorner(2, 2):" << endl; cout << m.bottomLeftCorner(2, 2) << endl; m.bottomLeftCorner(2, 2).setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_bottomRightCorner_int_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.bottomRightCorner(2, 2):" << endl; cout << m.bottomRightCorner(2, 2) << endl; m.bottomRightCorner(2, 2).setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_bottomRows_int.cpp ================================================ Array44i a = Array44i::Random(); cout << "Here is the array a:" << endl << a << endl; cout << "Here is a.bottomRows(2):" << endl; cout << a.bottomRows(2) << endl; a.bottomRows(2).setZero(); cout << "Now the array a is:" << endl << a << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_cast.cpp ================================================ Matrix2d md = Matrix2d::Identity() * 0.45; Matrix2f mf = Matrix2f::Identity(); cout << md + mf.cast() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_col.cpp ================================================ Matrix3d m = Matrix3d::Identity(); m.col(1) = Vector3d(4,5,6); cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_colwise.cpp ================================================ Matrix3d m = Matrix3d::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the sum of each column:" << endl << m.colwise().sum() << endl; cout << "Here is the maximum absolute value of each column:" << endl << m.cwiseAbs().colwise().maxCoeff() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_colwise_iterator_cxx11.cpp ================================================ Matrix3i m = Matrix3i::Random(); cout << "Here is the initial matrix m:" << endl << m << endl; int i = -1; for(auto c: m.colwise()) { c *= i; ++i; } cout << "Here is the matrix m after the for-range-loop:" << endl << m << endl; auto cols = m.colwise(); auto it = std::find_if(cols.cbegin(), cols.cend(), [](Matrix3i::ConstColXpr x) { return x.squaredNorm() == 0; }); cout << "The first empty column is: " << distance(cols.cbegin(),it) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_computeInverseAndDetWithCheck.cpp ================================================ Matrix3d m = Matrix3d::Random(); cout << "Here is the matrix m:" << endl << m << endl; Matrix3d inverse; bool invertible; double determinant; m.computeInverseAndDetWithCheck(inverse,determinant,invertible); cout << "Its determinant is " << determinant << endl; if(invertible) { cout << "It is invertible, and its inverse is:" << endl << inverse << endl; } else { cout << "It is not invertible." << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_computeInverseWithCheck.cpp ================================================ Matrix3d m = Matrix3d::Random(); cout << "Here is the matrix m:" << endl << m << endl; Matrix3d inverse; bool invertible; m.computeInverseWithCheck(inverse,invertible); if(invertible) { cout << "It is invertible, and its inverse is:" << endl << inverse << endl; } else { cout << "It is not invertible." << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_cwiseAbs.cpp ================================================ MatrixXd m(2,3); m << 2, -4, 6, -5, 1, 0; cout << m.cwiseAbs() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_cwiseAbs2.cpp ================================================ MatrixXd m(2,3); m << 2, -4, 6, -5, 1, 0; cout << m.cwiseAbs2() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_cwiseArg.cpp ================================================ MatrixXcf v = MatrixXcf::Random(2, 3); cout << v << endl << endl; cout << v.cwiseArg() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_cwiseEqual.cpp ================================================ MatrixXi m(2,2); m << 1, 0, 1, 1; cout << "Comparing m with identity matrix:" << endl; cout << m.cwiseEqual(MatrixXi::Identity(2,2)) << endl; Index count = m.cwiseEqual(MatrixXi::Identity(2,2)).count(); cout << "Number of coefficients that are equal: " << count << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_cwiseInverse.cpp ================================================ MatrixXd m(2,3); m << 2, 0.5, 1, 3, 0.25, 1; cout << m.cwiseInverse() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_cwiseMax.cpp ================================================ Vector3d v(2,3,4), w(4,2,3); cout << v.cwiseMax(w) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_cwiseMin.cpp ================================================ Vector3d v(2,3,4), w(4,2,3); cout << v.cwiseMin(w) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_cwiseNotEqual.cpp ================================================ MatrixXi m(2,2); m << 1, 0, 1, 1; cout << "Comparing m with identity matrix:" << endl; cout << m.cwiseNotEqual(MatrixXi::Identity(2,2)) << endl; Index count = m.cwiseNotEqual(MatrixXi::Identity(2,2)).count(); cout << "Number of coefficients that are not equal: " << count << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_cwiseProduct.cpp ================================================ Matrix3i a = Matrix3i::Random(), b = Matrix3i::Random(); Matrix3i c = a.cwiseProduct(b); cout << "a:\n" << a << "\nb:\n" << b << "\nc:\n" << c << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_cwiseQuotient.cpp ================================================ Vector3d v(2,3,4), w(4,2,3); cout << v.cwiseQuotient(w) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_cwiseSign.cpp ================================================ MatrixXd m(2,3); m << 2, -4, 6, -5, 1, 0; cout << m.cwiseSign() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_cwiseSqrt.cpp ================================================ Vector3d v(1,2,4); cout << v.cwiseSqrt() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_diagonal.cpp ================================================ Matrix3i m = Matrix3i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here are the coefficients on the main diagonal of m:" << endl << m.diagonal() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_diagonal_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here are the coefficients on the 1st super-diagonal and 2nd sub-diagonal of m:" << endl << m.diagonal(1).transpose() << endl << m.diagonal(-2).transpose() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_diagonal_template_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here are the coefficients on the 1st super-diagonal and 2nd sub-diagonal of m:" << endl << m.diagonal<1>().transpose() << endl << m.diagonal<-2>().transpose() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_eigenvalues.cpp ================================================ MatrixXd ones = MatrixXd::Ones(3,3); VectorXcd eivals = ones.eigenvalues(); cout << "The eigenvalues of the 3x3 matrix of ones are:" << endl << eivals << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_end_int.cpp ================================================ RowVector4i v = RowVector4i::Random(); cout << "Here is the vector v:" << endl << v << endl; cout << "Here is v.tail(2):" << endl << v.tail(2) << endl; v.tail(2).setZero(); cout << "Now the vector v is:" << endl << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_eval.cpp ================================================ Matrix2f M = Matrix2f::Random(); Matrix2f m; m = M; cout << "Here is the matrix m:" << endl << m << endl; cout << "Now we want to copy a column into a row." << endl; cout << "If we do m.col(1) = m.row(0), then m becomes:" << endl; m.col(1) = m.row(0); cout << m << endl << "which is wrong!" << endl; cout << "Now let us instead do m.col(1) = m.row(0).eval(). Then m becomes" << endl; m = M; m.col(1) = m.row(0).eval(); cout << m << endl << "which is right." << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_fixedBlock_int_int.cpp ================================================ Matrix4d m = Vector4d(1,2,3,4).asDiagonal(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.fixed<2, 2>(2, 2):" << endl << m.block<2, 2>(2, 2) << endl; m.block<2, 2>(2, 0) = m.block<2, 2>(2, 2); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_hnormalized.cpp ================================================ Vector4d v = Vector4d::Random(); Projective3d P(Matrix4d::Random()); cout << "v = " << v.transpose() << "]^T" << endl; cout << "v.hnormalized() = " << v.hnormalized().transpose() << "]^T" << endl; cout << "P*v = " << (P*v).transpose() << "]^T" << endl; cout << "(P*v).hnormalized() = " << (P*v).hnormalized().transpose() << "]^T" << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_homogeneous.cpp ================================================ Vector3d v = Vector3d::Random(), w; Projective3d P(Matrix4d::Random()); cout << "v = [" << v.transpose() << "]^T" << endl; cout << "h.homogeneous() = [" << v.homogeneous().transpose() << "]^T" << endl; cout << "(P * v.homogeneous()) = [" << (P * v.homogeneous()).transpose() << "]^T" << endl; cout << "(P * v.homogeneous()).hnormalized() = [" << (P * v.homogeneous()).eval().hnormalized().transpose() << "]^T" << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_identity.cpp ================================================ cout << Matrix::Identity() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_identity_int_int.cpp ================================================ cout << MatrixXd::Identity(4, 3) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_inverse.cpp ================================================ Matrix3d m = Matrix3d::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Its inverse is:" << endl << m.inverse() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_isDiagonal.cpp ================================================ Matrix3d m = 10000 * Matrix3d::Identity(); m(0,2) = 1; cout << "Here's the matrix m:" << endl << m << endl; cout << "m.isDiagonal() returns: " << m.isDiagonal() << endl; cout << "m.isDiagonal(1e-3) returns: " << m.isDiagonal(1e-3) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_isIdentity.cpp ================================================ Matrix3d m = Matrix3d::Identity(); m(0,2) = 1e-4; cout << "Here's the matrix m:" << endl << m << endl; cout << "m.isIdentity() returns: " << m.isIdentity() << endl; cout << "m.isIdentity(1e-3) returns: " << m.isIdentity(1e-3) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_isOnes.cpp ================================================ Matrix3d m = Matrix3d::Ones(); m(0,2) += 1e-4; cout << "Here's the matrix m:" << endl << m << endl; cout << "m.isOnes() returns: " << m.isOnes() << endl; cout << "m.isOnes(1e-3) returns: " << m.isOnes(1e-3) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_isOrthogonal.cpp ================================================ Vector3d v(1,0,0); Vector3d w(1e-4,0,1); cout << "Here's the vector v:" << endl << v << endl; cout << "Here's the vector w:" << endl << w << endl; cout << "v.isOrthogonal(w) returns: " << v.isOrthogonal(w) << endl; cout << "v.isOrthogonal(w,1e-3) returns: " << v.isOrthogonal(w,1e-3) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_isUnitary.cpp ================================================ Matrix3d m = Matrix3d::Identity(); m(0,2) = 1e-4; cout << "Here's the matrix m:" << endl << m << endl; cout << "m.isUnitary() returns: " << m.isUnitary() << endl; cout << "m.isUnitary(1e-3) returns: " << m.isUnitary(1e-3) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_isZero.cpp ================================================ Matrix3d m = Matrix3d::Zero(); m(0,2) = 1e-4; cout << "Here's the matrix m:" << endl << m << endl; cout << "m.isZero() returns: " << m.isZero() << endl; cout << "m.isZero(1e-3) returns: " << m.isZero(1e-3) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_leftCols_int.cpp ================================================ Array44i a = Array44i::Random(); cout << "Here is the array a:" << endl << a << endl; cout << "Here is a.leftCols(2):" << endl; cout << a.leftCols(2) << endl; a.leftCols(2).setZero(); cout << "Now the array a is:" << endl << a << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_noalias.cpp ================================================ Matrix2d a, b, c; a << 1,2,3,4; b << 5,6,7,8; c.noalias() = a * b; // this computes the product directly to c cout << c << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_ones.cpp ================================================ cout << Matrix2d::Ones() << endl; cout << 6 * RowVector4i::Ones() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_ones_int.cpp ================================================ cout << 6 * RowVectorXi::Ones(4) << endl; cout << VectorXf::Ones(2) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_ones_int_int.cpp ================================================ cout << MatrixXi::Ones(2,3) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_operatorNorm.cpp ================================================ MatrixXd ones = MatrixXd::Ones(3,3); cout << "The operator norm of the 3x3 matrix of ones is " << ones.operatorNorm() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_prod.cpp ================================================ Matrix3d m = Matrix3d::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the product of all the coefficients:" << endl << m.prod() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_random.cpp ================================================ cout << 100 * Matrix2i::Random() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_random_int.cpp ================================================ cout << VectorXi::Random(2) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_random_int_int.cpp ================================================ cout << MatrixXi::Random(2,3) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_replicate.cpp ================================================ MatrixXi m = MatrixXi::Random(2,3); cout << "Here is the matrix m:" << endl << m << endl; cout << "m.replicate<3,2>() = ..." << endl; cout << m.replicate<3,2>() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_replicate_int_int.cpp ================================================ Vector3i v = Vector3i::Random(); cout << "Here is the vector v:" << endl << v << endl; cout << "v.replicate(2,5) = ..." << endl; cout << v.replicate(2,5) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_reshaped_auto.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.reshaped(2, AutoSize):" << endl << m.reshaped(2, AutoSize) << endl; cout << "Here is m.reshaped(AutoSize, fix<8>):" << endl << m.reshaped(AutoSize, fix<8>) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_reshaped_fixed.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.reshaped(fix<2>,fix<8>):" << endl << m.reshaped(fix<2>,fix<8>) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_reshaped_int_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.reshaped(2, 8):" << endl << m.reshaped(2, 8) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_reshaped_to_vector.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.reshaped().transpose():" << endl << m.reshaped().transpose() << endl; cout << "Here is m.reshaped().transpose(): " << endl << m.reshaped().transpose() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_reverse.cpp ================================================ MatrixXi m = MatrixXi::Random(3,4); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the reverse of m:" << endl << m.reverse() << endl; cout << "Here is the coefficient (1,0) in the reverse of m:" << endl << m.reverse()(1,0) << endl; cout << "Let us overwrite this coefficient with the value 4." << endl; m.reverse()(1,0) = 4; cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_rightCols_int.cpp ================================================ Array44i a = Array44i::Random(); cout << "Here is the array a:" << endl << a << endl; cout << "Here is a.rightCols(2):" << endl; cout << a.rightCols(2) << endl; a.rightCols(2).setZero(); cout << "Now the array a is:" << endl << a << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_row.cpp ================================================ Matrix3d m = Matrix3d::Identity(); m.row(1) = Vector3d(4,5,6); cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_rowwise.cpp ================================================ Matrix3d m = Matrix3d::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the sum of each row:" << endl << m.rowwise().sum() << endl; cout << "Here is the maximum absolute value of each row:" << endl << m.cwiseAbs().rowwise().maxCoeff() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_segment_int_int.cpp ================================================ RowVector4i v = RowVector4i::Random(); cout << "Here is the vector v:" << endl << v << endl; cout << "Here is v.segment(1, 2):" << endl << v.segment(1, 2) << endl; v.segment(1, 2).setZero(); cout << "Now the vector v is:" << endl << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_select.cpp ================================================ MatrixXi m(3, 3); m << 1, 2, 3, 4, 5, 6, 7, 8, 9; m = (m.array() >= 5).select(-m, m); cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_selfadjointView.cpp ================================================ Matrix3i m = Matrix3i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the symmetric matrix extracted from the upper part of m:" << endl << Matrix3i(m.selfadjointView()) << endl; cout << "Here is the symmetric matrix extracted from the lower part of m:" << endl << Matrix3i(m.selfadjointView()) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_set.cpp ================================================ Matrix3i m1; m1 << 1, 2, 3, 4, 5, 6, 7, 8, 9; cout << m1 << endl << endl; Matrix3i m2 = Matrix3i::Identity(); m2.block(0,0, 2,2) << 10, 11, 12, 13; cout << m2 << endl << endl; Vector2i v1; v1 << 14, 15; m2 << v1.transpose(), 16, v1, m1.block(1,1,2,2); cout << m2 << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_setIdentity.cpp ================================================ Matrix4i m = Matrix4i::Zero(); m.block<3,3>(1,0).setIdentity(); cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_setOnes.cpp ================================================ Matrix4i m = Matrix4i::Random(); m.row(1).setOnes(); cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_setRandom.cpp ================================================ Matrix4i m = Matrix4i::Zero(); m.col(1).setRandom(); cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_setZero.cpp ================================================ Matrix4i m = Matrix4i::Random(); m.row(1).setZero(); cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_start_int.cpp ================================================ RowVector4i v = RowVector4i::Random(); cout << "Here is the vector v:" << endl << v << endl; cout << "Here is v.head(2):" << endl << v.head(2) << endl; v.head(2).setZero(); cout << "Now the vector v is:" << endl << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_bottomRows.cpp ================================================ Array44i a = Array44i::Random(); cout << "Here is the array a:" << endl << a << endl; cout << "Here is a.bottomRows<2>():" << endl; cout << a.bottomRows<2>() << endl; a.bottomRows<2>().setZero(); cout << "Now the array a is:" << endl << a << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_end.cpp ================================================ RowVector4i v = RowVector4i::Random(); cout << "Here is the vector v:" << endl << v << endl; cout << "Here is v.tail(2):" << endl << v.tail<2>() << endl; v.tail<2>().setZero(); cout << "Now the vector v is:" << endl << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_int_block_int_int_int_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the block:" << endl << m.block<2, Dynamic>(1, 1, 2, 3) << endl; m.block<2, Dynamic>(1, 1, 2, 3).setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_int_bottomLeftCorner.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.bottomLeftCorner<2,2>():" << endl; cout << m.bottomLeftCorner<2,2>() << endl; m.bottomLeftCorner<2,2>().setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_int_bottomLeftCorner_int_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.bottomLeftCorner<2,Dynamic>(2,2):" << endl; cout << m.bottomLeftCorner<2,Dynamic>(2,2) << endl; m.bottomLeftCorner<2,Dynamic>(2,2).setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_int_bottomRightCorner.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.bottomRightCorner<2,2>():" << endl; cout << m.bottomRightCorner<2,2>() << endl; m.bottomRightCorner<2,2>().setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_int_bottomRightCorner_int_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.bottomRightCorner<2,Dynamic>(2,2):" << endl; cout << m.bottomRightCorner<2,Dynamic>(2,2) << endl; m.bottomRightCorner<2,Dynamic>(2,2).setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_int_topLeftCorner.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.topLeftCorner<2,2>():" << endl; cout << m.topLeftCorner<2,2>() << endl; m.topLeftCorner<2,2>().setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_int_topLeftCorner_int_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.topLeftCorner<2,Dynamic>(2,2):" << endl; cout << m.topLeftCorner<2,Dynamic>(2,2) << endl; m.topLeftCorner<2,Dynamic>(2,2).setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_int_topRightCorner.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.topRightCorner<2,2>():" << endl; cout << m.topRightCorner<2,2>() << endl; m.topRightCorner<2,2>().setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_int_topRightCorner_int_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.topRightCorner<2,Dynamic>(2,2):" << endl; cout << m.topRightCorner<2,Dynamic>(2,2) << endl; m.topRightCorner<2,Dynamic>(2,2).setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_leftCols.cpp ================================================ Array44i a = Array44i::Random(); cout << "Here is the array a:" << endl << a << endl; cout << "Here is a.leftCols<2>():" << endl; cout << a.leftCols<2>() << endl; a.leftCols<2>().setZero(); cout << "Now the array a is:" << endl << a << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_rightCols.cpp ================================================ Array44i a = Array44i::Random(); cout << "Here is the array a:" << endl << a << endl; cout << "Here is a.rightCols<2>():" << endl; cout << a.rightCols<2>() << endl; a.rightCols<2>().setZero(); cout << "Now the array a is:" << endl << a << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_segment.cpp ================================================ RowVector4i v = RowVector4i::Random(); cout << "Here is the vector v:" << endl << v << endl; cout << "Here is v.segment<2>(1):" << endl << v.segment<2>(1) << endl; v.segment<2>(2).setZero(); cout << "Now the vector v is:" << endl << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_start.cpp ================================================ RowVector4i v = RowVector4i::Random(); cout << "Here is the vector v:" << endl << v << endl; cout << "Here is v.head(2):" << endl << v.head<2>() << endl; v.head<2>().setZero(); cout << "Now the vector v is:" << endl << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_template_int_topRows.cpp ================================================ Array44i a = Array44i::Random(); cout << "Here is the array a:" << endl << a << endl; cout << "Here is a.topRows<2>():" << endl; cout << a.topRows<2>() << endl; a.topRows<2>().setZero(); cout << "Now the array a is:" << endl << a << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_topLeftCorner_int_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.topLeftCorner(2, 2):" << endl; cout << m.topLeftCorner(2, 2) << endl; m.topLeftCorner(2, 2).setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_topRightCorner_int_int.cpp ================================================ Matrix4i m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.topRightCorner(2, 2):" << endl; cout << m.topRightCorner(2, 2) << endl; m.topRightCorner(2, 2).setZero(); cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_topRows_int.cpp ================================================ Array44i a = Array44i::Random(); cout << "Here is the array a:" << endl << a << endl; cout << "Here is a.topRows(2):" << endl; cout << a.topRows(2) << endl; a.topRows(2).setZero(); cout << "Now the array a is:" << endl << a << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_transpose.cpp ================================================ Matrix2i m = Matrix2i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the transpose of m:" << endl << m.transpose() << endl; cout << "Here is the coefficient (1,0) in the transpose of m:" << endl << m.transpose()(1,0) << endl; cout << "Let us overwrite this coefficient with the value 0." << endl; m.transpose()(1,0) = 0; cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_triangularView.cpp ================================================ Matrix3i m = Matrix3i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the upper-triangular matrix extracted from m:" << endl << Matrix3i(m.triangularView()) << endl; cout << "Here is the strictly-upper-triangular matrix extracted from m:" << endl << Matrix3i(m.triangularView()) << endl; cout << "Here is the unit-lower-triangular matrix extracted from m:" << endl << Matrix3i(m.triangularView()) << endl; // FIXME need to implement output for triangularViews (Bug 885) ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_zero.cpp ================================================ cout << Matrix2d::Zero() << endl; cout << RowVector4i::Zero() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_zero_int.cpp ================================================ cout << RowVectorXi::Zero(4) << endl; cout << VectorXf::Zero(2) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/MatrixBase_zero_int_int.cpp ================================================ cout << MatrixXi::Zero(2,3) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_Map_stride.cpp ================================================ Matrix4i A; A << 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16; std::cout << Matrix2i::Map(&A(1,1),Stride<8,2>()) << std::endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_initializer_list_23_cxx11.cpp ================================================ MatrixXd m { {1, 2, 3}, {4, 5, 6} }; cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_initializer_list_vector_cxx11.cpp ================================================ VectorXi v {{1, 2}}; cout << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_resize_NoChange_int.cpp ================================================ MatrixXd m(3,4); m.resize(NoChange, 5); cout << "m: " << m.rows() << " rows, " << m.cols() << " cols" << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_resize_int.cpp ================================================ VectorXd v(10); v.resize(3); RowVector3d w; w.resize(3); // this is legal, but has no effect cout << "v: " << v.rows() << " rows, " << v.cols() << " cols" << endl; cout << "w: " << w.rows() << " rows, " << w.cols() << " cols" << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_resize_int_NoChange.cpp ================================================ MatrixXd m(3,4); m.resize(5, NoChange); cout << "m: " << m.rows() << " rows, " << m.cols() << " cols" << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_resize_int_int.cpp ================================================ MatrixXd m(2,3); m << 1,2,3,4,5,6; cout << "here's the 2x3 matrix m:" << endl << m << endl; cout << "let's resize m to 3x2. This is a conservative resizing because 2*3==3*2." << endl; m.resize(3,2); cout << "here's the 3x2 matrix m:" << endl << m << endl; cout << "now let's resize m to size 2x2. This is NOT a conservative resizing, so it becomes uninitialized:" << endl; m.resize(2,2); cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_setConstant_int.cpp ================================================ VectorXf v; v.setConstant(3, 5); cout << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_setConstant_int_int.cpp ================================================ MatrixXf m; m.setConstant(3, 3, 5); cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_setIdentity_int_int.cpp ================================================ MatrixXf m; m.setIdentity(3, 3); cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_setOnes_int.cpp ================================================ VectorXf v; v.setOnes(3); cout << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_setOnes_int_int.cpp ================================================ MatrixXf m; m.setOnes(3, 3); cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_setRandom_int.cpp ================================================ VectorXf v; v.setRandom(3); cout << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_setRandom_int_int.cpp ================================================ MatrixXf m; m.setRandom(3, 3); cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_setZero_int.cpp ================================================ VectorXf v; v.setZero(3); cout << v << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_setZero_int_int.cpp ================================================ MatrixXf m; m.setZero(3, 3); cout << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Matrix_variadic_ctor_cxx11.cpp ================================================ Matrix a(1, 2, 3, 4, 5, 6); Matrix b {1, 2, 3}; cout << a << "\n\n" << b << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/PartialPivLU_solve.cpp ================================================ MatrixXd A = MatrixXd::Random(3,3); MatrixXd B = MatrixXd::Random(3,2); cout << "Here is the invertible matrix A:" << endl << A << endl; cout << "Here is the matrix B:" << endl << B << endl; MatrixXd X = A.lu().solve(B); cout << "Here is the (unique) solution X to the equation AX=B:" << endl << X << endl; cout << "Relative error: " << (A*X-B).norm() / B.norm() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/PartialRedux_count.cpp ================================================ Matrix3d m = Matrix3d::Random(); cout << "Here is the matrix m:" << endl << m << endl; Matrix res = (m.array() >= 0.5).rowwise().count(); cout << "Here is the count of elements larger or equal than 0.5 of each row:" << endl; cout << res << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/PartialRedux_maxCoeff.cpp ================================================ Matrix3d m = Matrix3d::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the maximum of each column:" << endl << m.colwise().maxCoeff() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/PartialRedux_minCoeff.cpp ================================================ Matrix3d m = Matrix3d::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the minimum of each column:" << endl << m.colwise().minCoeff() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/PartialRedux_norm.cpp ================================================ Matrix3d m = Matrix3d::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the norm of each column:" << endl << m.colwise().norm() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/PartialRedux_prod.cpp ================================================ Matrix3d m = Matrix3d::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the product of each row:" << endl << m.rowwise().prod() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/PartialRedux_squaredNorm.cpp ================================================ Matrix3d m = Matrix3d::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the square norm of each row:" << endl << m.rowwise().squaredNorm() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/PartialRedux_sum.cpp ================================================ Matrix3d m = Matrix3d::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the sum of each row:" << endl << m.rowwise().sum() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/RealQZ_compute.cpp ================================================ MatrixXf A = MatrixXf::Random(4,4); MatrixXf B = MatrixXf::Random(4,4); RealQZ qz(4); // preallocate space for 4x4 matrices qz.compute(A,B); // A = Q S Z, B = Q T Z // print original matrices and result of decomposition cout << "A:\n" << A << "\n" << "B:\n" << B << "\n"; cout << "S:\n" << qz.matrixS() << "\n" << "T:\n" << qz.matrixT() << "\n"; cout << "Q:\n" << qz.matrixQ() << "\n" << "Z:\n" << qz.matrixZ() << "\n"; // verify precision cout << "\nErrors:" << "\n|A-QSZ|: " << (A-qz.matrixQ()*qz.matrixS()*qz.matrixZ()).norm() << ", |B-QTZ|: " << (B-qz.matrixQ()*qz.matrixT()*qz.matrixZ()).norm() << "\n|QQ* - I|: " << (qz.matrixQ()*qz.matrixQ().adjoint() - MatrixXf::Identity(4,4)).norm() << ", |ZZ* - I|: " << (qz.matrixZ()*qz.matrixZ().adjoint() - MatrixXf::Identity(4,4)).norm() << "\n"; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/RealSchur_RealSchur_MatrixType.cpp ================================================ MatrixXd A = MatrixXd::Random(6,6); cout << "Here is a random 6x6 matrix, A:" << endl << A << endl << endl; RealSchur schur(A); cout << "The orthogonal matrix U is:" << endl << schur.matrixU() << endl; cout << "The quasi-triangular matrix T is:" << endl << schur.matrixT() << endl << endl; MatrixXd U = schur.matrixU(); MatrixXd T = schur.matrixT(); cout << "U * T * U^T = " << endl << U * T * U.transpose() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/RealSchur_compute.cpp ================================================ MatrixXf A = MatrixXf::Random(4,4); RealSchur schur(4); schur.compute(A, /* computeU = */ false); cout << "The matrix T in the decomposition of A is:" << endl << schur.matrixT() << endl; schur.compute(A.inverse(), /* computeU = */ false); cout << "The matrix T in the decomposition of A^(-1) is:" << endl << schur.matrixT() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp ================================================ SelfAdjointEigenSolver es; Matrix4f X = Matrix4f::Random(4,4); Matrix4f A = X + X.transpose(); es.compute(A); cout << "The eigenvalues of A are: " << es.eigenvalues().transpose() << endl; es.compute(A + Matrix4f::Identity(4,4)); // re-use es to compute eigenvalues of A+I cout << "The eigenvalues of A+I are: " << es.eigenvalues().transpose() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp ================================================ MatrixXd X = MatrixXd::Random(5,5); MatrixXd A = X + X.transpose(); cout << "Here is a random symmetric 5x5 matrix, A:" << endl << A << endl << endl; SelfAdjointEigenSolver es(A); cout << "The eigenvalues of A are:" << endl << es.eigenvalues() << endl; cout << "The matrix of eigenvectors, V, is:" << endl << es.eigenvectors() << endl << endl; double lambda = es.eigenvalues()[0]; cout << "Consider the first eigenvalue, lambda = " << lambda << endl; VectorXd v = es.eigenvectors().col(0); cout << "If v is the corresponding eigenvector, then lambda * v = " << endl << lambda * v << endl; cout << "... and A * v = " << endl << A * v << endl << endl; MatrixXd D = es.eigenvalues().asDiagonal(); MatrixXd V = es.eigenvectors(); cout << "Finally, V * D * V^(-1) = " << endl << V * D * V.inverse() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp ================================================ MatrixXd X = MatrixXd::Random(5,5); MatrixXd A = X + X.transpose(); cout << "Here is a random symmetric matrix, A:" << endl << A << endl; X = MatrixXd::Random(5,5); MatrixXd B = X * X.transpose(); cout << "and a random positive-definite matrix, B:" << endl << B << endl << endl; GeneralizedSelfAdjointEigenSolver es(A,B); cout << "The eigenvalues of the pencil (A,B) are:" << endl << es.eigenvalues() << endl; cout << "The matrix of eigenvectors, V, is:" << endl << es.eigenvectors() << endl << endl; double lambda = es.eigenvalues()[0]; cout << "Consider the first eigenvalue, lambda = " << lambda << endl; VectorXd v = es.eigenvectors().col(0); cout << "If v is the corresponding eigenvector, then A * v = " << endl << A * v << endl; cout << "... and lambda * B * v = " << endl << lambda * B * v << endl << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/SelfAdjointEigenSolver_compute_MatrixType.cpp ================================================ SelfAdjointEigenSolver es(4); MatrixXf X = MatrixXf::Random(4,4); MatrixXf A = X + X.transpose(); es.compute(A); cout << "The eigenvalues of A are: " << es.eigenvalues().transpose() << endl; es.compute(A + MatrixXf::Identity(4,4)); // re-use es to compute eigenvalues of A+I cout << "The eigenvalues of A+I are: " << es.eigenvalues().transpose() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/SelfAdjointEigenSolver_compute_MatrixType2.cpp ================================================ MatrixXd X = MatrixXd::Random(5,5); MatrixXd A = X * X.transpose(); X = MatrixXd::Random(5,5); MatrixXd B = X * X.transpose(); GeneralizedSelfAdjointEigenSolver es(A,B,EigenvaluesOnly); cout << "The eigenvalues of the pencil (A,B) are:" << endl << es.eigenvalues() << endl; es.compute(B,A,false); cout << "The eigenvalues of the pencil (B,A) are:" << endl << es.eigenvalues() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/SelfAdjointEigenSolver_eigenvalues.cpp ================================================ MatrixXd ones = MatrixXd::Ones(3,3); SelfAdjointEigenSolver es(ones); cout << "The eigenvalues of the 3x3 matrix of ones are:" << endl << es.eigenvalues() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/SelfAdjointEigenSolver_eigenvectors.cpp ================================================ MatrixXd ones = MatrixXd::Ones(3,3); SelfAdjointEigenSolver es(ones); cout << "The first eigenvector of the 3x3 matrix of ones is:" << endl << es.eigenvectors().col(0) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/SelfAdjointEigenSolver_operatorInverseSqrt.cpp ================================================ MatrixXd X = MatrixXd::Random(4,4); MatrixXd A = X * X.transpose(); cout << "Here is a random positive-definite matrix, A:" << endl << A << endl << endl; SelfAdjointEigenSolver es(A); cout << "The inverse square root of A is: " << endl; cout << es.operatorInverseSqrt() << endl; cout << "We can also compute it with operatorSqrt() and inverse(). That yields: " << endl; cout << es.operatorSqrt().inverse() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/SelfAdjointEigenSolver_operatorSqrt.cpp ================================================ MatrixXd X = MatrixXd::Random(4,4); MatrixXd A = X * X.transpose(); cout << "Here is a random positive-definite matrix, A:" << endl << A << endl << endl; SelfAdjointEigenSolver es(A); MatrixXd sqrtA = es.operatorSqrt(); cout << "The square root of A is: " << endl << sqrtA << endl; cout << "If we square this, we get: " << endl << sqrtA*sqrtA << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/SelfAdjointView_eigenvalues.cpp ================================================ MatrixXd ones = MatrixXd::Ones(3,3); VectorXd eivals = ones.selfadjointView().eigenvalues(); cout << "The eigenvalues of the 3x3 matrix of ones are:" << endl << eivals << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/SelfAdjointView_operatorNorm.cpp ================================================ MatrixXd ones = MatrixXd::Ones(3,3); cout << "The operator norm of the 3x3 matrix of ones is " << ones.selfadjointView().operatorNorm() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Slicing_arrayexpr.cpp ================================================ ArrayXi ind(5); ind<<4,2,5,5,3; MatrixXi A = MatrixXi::Random(4,6); cout << "Initial matrix A:\n" << A << "\n\n"; cout << "A(all,ind-1):\n" << A(all,ind-1) << "\n\n"; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Slicing_custom_padding_cxx11.cpp ================================================ struct pad { Index size() const { return out_size; } Index operator[] (Index i) const { return std::max(0,i-(out_size-in_size)); } Index in_size, out_size; }; Matrix3i A; A.reshaped() = VectorXi::LinSpaced(9,1,9); cout << "Initial matrix A:\n" << A << "\n\n"; MatrixXi B(5,5); B = A(pad{3,5}, pad{3,5}); cout << "A(pad{3,N}, pad{3,N}):\n" << B << "\n\n"; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Slicing_rawarray_cxx11.cpp ================================================ #if EIGEN_HAS_STATIC_ARRAY_TEMPLATE MatrixXi A = MatrixXi::Random(4,6); cout << "Initial matrix A:\n" << A << "\n\n"; cout << "A(all,{4,2,5,5,3}):\n" << A(all,{4,2,5,5,3}) << "\n\n"; #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Slicing_stdvector_cxx11.cpp ================================================ std::vector ind{4,2,5,5,3}; MatrixXi A = MatrixXi::Random(4,6); cout << "Initial matrix A:\n" << A << "\n\n"; cout << "A(all,ind):\n" << A(all,ind) << "\n\n"; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/SparseMatrix_coeffs.cpp ================================================ SparseMatrix A(3,3); A.insert(1,2) = 0; A.insert(0,1) = 1; A.insert(2,0) = 2; A.makeCompressed(); cout << "The matrix A is:" << endl << MatrixXd(A) << endl; cout << "it has " << A.nonZeros() << " stored non zero coefficients that are: " << A.coeffs().transpose() << endl; A.coeffs() += 10; cout << "After adding 10 to every stored non zero coefficient, the matrix A is:" << endl << MatrixXd(A) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/TopicAliasing_block.cpp ================================================ MatrixXi mat(3,3); mat << 1, 2, 3, 4, 5, 6, 7, 8, 9; cout << "Here is the matrix mat:\n" << mat << endl; // This assignment shows the aliasing problem mat.bottomRightCorner(2,2) = mat.topLeftCorner(2,2); cout << "After the assignment, mat = \n" << mat << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/TopicAliasing_block_correct.cpp ================================================ MatrixXi mat(3,3); mat << 1, 2, 3, 4, 5, 6, 7, 8, 9; cout << "Here is the matrix mat:\n" << mat << endl; // The eval() solves the aliasing problem mat.bottomRightCorner(2,2) = mat.topLeftCorner(2,2).eval(); cout << "After the assignment, mat = \n" << mat << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/TopicAliasing_cwise.cpp ================================================ MatrixXf mat(2,2); mat << 1, 2, 4, 7; cout << "Here is the matrix mat:\n" << mat << endl << endl; mat = 2 * mat; cout << "After 'mat = 2 * mat', mat = \n" << mat << endl << endl; mat = mat - MatrixXf::Identity(2,2); cout << "After the subtraction, it becomes\n" << mat << endl << endl; ArrayXXf arr = mat; arr = arr.square(); cout << "After squaring, it becomes\n" << arr << endl << endl; // Combining all operations in one statement: mat << 1, 2, 4, 7; mat = (2 * mat - MatrixXf::Identity(2,2)).array().square(); cout << "Doing everything at once yields\n" << mat << endl << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/TopicAliasing_mult1.cpp ================================================ MatrixXf matA(2,2); matA << 2, 0, 0, 2; matA = matA * matA; cout << matA; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/TopicAliasing_mult2.cpp ================================================ MatrixXf matA(2,2), matB(2,2); matA << 2, 0, 0, 2; // Simple but not quite as efficient matB = matA * matA; cout << matB << endl << endl; // More complicated but also more efficient matB.noalias() = matA * matA; cout << matB; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/TopicAliasing_mult3.cpp ================================================ MatrixXf matA(2,2); matA << 2, 0, 0, 2; matA.noalias() = matA * matA; cout << matA; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/TopicAliasing_mult4.cpp ================================================ MatrixXf A(2,2), B(3,2); B << 2, 0, 0, 3, 1, 1; A << 2, 0, 0, -2; A = (B * A).cwiseAbs(); cout << A; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/TopicAliasing_mult5.cpp ================================================ MatrixXf A(2,2), B(3,2); B << 2, 0, 0, 3, 1, 1; A << 2, 0, 0, -2; A = (B * A).eval().cwiseAbs(); cout << A; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/TopicStorageOrders_example.cpp ================================================ Matrix Acolmajor; Acolmajor << 8, 2, 2, 9, 9, 1, 4, 4, 3, 5, 4, 5; cout << "The matrix A:" << endl; cout << Acolmajor << endl << endl; cout << "In memory (column-major):" << endl; for (int i = 0; i < Acolmajor.size(); i++) cout << *(Acolmajor.data() + i) << " "; cout << endl << endl; Matrix Arowmajor = Acolmajor; cout << "In memory (row-major):" << endl; for (int i = 0; i < Arowmajor.size(); i++) cout << *(Arowmajor.data() + i) << " "; cout << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Triangular_solve.cpp ================================================ Matrix3d m = Matrix3d::Zero(); m.triangularView().setOnes(); cout << "Here is the matrix m:\n" << m << endl; Matrix3d n = Matrix3d::Ones(); n.triangularView() *= 2; cout << "Here is the matrix n:\n" << n << endl; cout << "And now here is m.inverse()*n, taking advantage of the fact that" " m is upper-triangular:\n" << m.triangularView().solve(n) << endl; cout << "And this is n*m.inverse():\n" << m.triangularView().solve(n); ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tridiagonalization_Tridiagonalization_MatrixType.cpp ================================================ MatrixXd X = MatrixXd::Random(5,5); MatrixXd A = X + X.transpose(); cout << "Here is a random symmetric 5x5 matrix:" << endl << A << endl << endl; Tridiagonalization triOfA(A); MatrixXd Q = triOfA.matrixQ(); cout << "The orthogonal matrix Q is:" << endl << Q << endl; MatrixXd T = triOfA.matrixT(); cout << "The tridiagonal matrix T is:" << endl << T << endl << endl; cout << "Q * T * Q^T = " << endl << Q * T * Q.transpose() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tridiagonalization_compute.cpp ================================================ Tridiagonalization tri; MatrixXf X = MatrixXf::Random(4,4); MatrixXf A = X + X.transpose(); tri.compute(A); cout << "The matrix T in the tridiagonal decomposition of A is: " << endl; cout << tri.matrixT() << endl; tri.compute(2*A); // re-use tri to compute eigenvalues of 2A cout << "The matrix T in the tridiagonal decomposition of 2A is: " << endl; cout << tri.matrixT() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tridiagonalization_decomposeInPlace.cpp ================================================ MatrixXd X = MatrixXd::Random(5,5); MatrixXd A = X + X.transpose(); cout << "Here is a random symmetric 5x5 matrix:" << endl << A << endl << endl; VectorXd diag(5); VectorXd subdiag(4); VectorXd hcoeffs(4); // Scratch space for householder reflector. internal::tridiagonalization_inplace(A, diag, subdiag, hcoeffs, true); cout << "The orthogonal matrix Q is:" << endl << A << endl; cout << "The diagonal of the tridiagonal matrix T is:" << endl << diag << endl; cout << "The subdiagonal of the tridiagonal matrix T is:" << endl << subdiag << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tridiagonalization_diagonal.cpp ================================================ MatrixXcd X = MatrixXcd::Random(4,4); MatrixXcd A = X + X.adjoint(); cout << "Here is a random self-adjoint 4x4 matrix:" << endl << A << endl << endl; Tridiagonalization triOfA(A); MatrixXd T = triOfA.matrixT(); cout << "The tridiagonal matrix T is:" << endl << T << endl << endl; cout << "We can also extract the diagonals of T directly ..." << endl; VectorXd diag = triOfA.diagonal(); cout << "The diagonal is:" << endl << diag << endl; VectorXd subdiag = triOfA.subDiagonal(); cout << "The subdiagonal is:" << endl << subdiag << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tridiagonalization_householderCoefficients.cpp ================================================ Matrix4d X = Matrix4d::Random(4,4); Matrix4d A = X + X.transpose(); cout << "Here is a random symmetric 4x4 matrix:" << endl << A << endl; Tridiagonalization triOfA(A); Vector3d hc = triOfA.householderCoefficients(); cout << "The vector of Householder coefficients is:" << endl << hc << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tridiagonalization_packedMatrix.cpp ================================================ Matrix4d X = Matrix4d::Random(4,4); Matrix4d A = X + X.transpose(); cout << "Here is a random symmetric 4x4 matrix:" << endl << A << endl; Tridiagonalization triOfA(A); Matrix4d pm = triOfA.packedMatrix(); cout << "The packed matrix M is:" << endl << pm << endl; cout << "The diagonal and subdiagonal corresponds to the matrix T, which is:" << endl << triOfA.matrixT() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_AdvancedInitialization_Block.cpp ================================================ MatrixXf matA(2, 2); matA << 1, 2, 3, 4; MatrixXf matB(4, 4); matB << matA, matA/10, matA/10, matA; std::cout << matB << std::endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_AdvancedInitialization_CommaTemporary.cpp ================================================ MatrixXf mat = MatrixXf::Random(2, 3); std::cout << mat << std::endl << std::endl; mat = (MatrixXf(2,2) << 0, 1, 1, 0).finished() * mat; std::cout << mat << std::endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_AdvancedInitialization_Join.cpp ================================================ RowVectorXd vec1(3); vec1 << 1, 2, 3; std::cout << "vec1 = " << vec1 << std::endl; RowVectorXd vec2(4); vec2 << 1, 4, 9, 16; std::cout << "vec2 = " << vec2 << std::endl; RowVectorXd joined(7); joined << vec1, vec2; std::cout << "joined = " << joined << std::endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_AdvancedInitialization_LinSpaced.cpp ================================================ ArrayXXf table(10, 4); table.col(0) = ArrayXf::LinSpaced(10, 0, 90); table.col(1) = M_PI / 180 * table.col(0); table.col(2) = table.col(1).sin(); table.col(3) = table.col(1).cos(); std::cout << " Degrees Radians Sine Cosine\n"; std::cout << table << std::endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_AdvancedInitialization_ThreeWays.cpp ================================================ const int size = 6; MatrixXd mat1(size, size); mat1.topLeftCorner(size/2, size/2) = MatrixXd::Zero(size/2, size/2); mat1.topRightCorner(size/2, size/2) = MatrixXd::Identity(size/2, size/2); mat1.bottomLeftCorner(size/2, size/2) = MatrixXd::Identity(size/2, size/2); mat1.bottomRightCorner(size/2, size/2) = MatrixXd::Zero(size/2, size/2); std::cout << mat1 << std::endl << std::endl; MatrixXd mat2(size, size); mat2.topLeftCorner(size/2, size/2).setZero(); mat2.topRightCorner(size/2, size/2).setIdentity(); mat2.bottomLeftCorner(size/2, size/2).setIdentity(); mat2.bottomRightCorner(size/2, size/2).setZero(); std::cout << mat2 << std::endl << std::endl; MatrixXd mat3(size, size); mat3 << MatrixXd::Zero(size/2, size/2), MatrixXd::Identity(size/2, size/2), MatrixXd::Identity(size/2, size/2), MatrixXd::Zero(size/2, size/2); std::cout << mat3 << std::endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_AdvancedInitialization_Zero.cpp ================================================ std::cout << "A fixed-size array:\n"; Array33f a1 = Array33f::Zero(); std::cout << a1 << "\n\n"; std::cout << "A one-dimensional dynamic-size array:\n"; ArrayXf a2 = ArrayXf::Zero(3); std::cout << a2 << "\n\n"; std::cout << "A two-dimensional dynamic-size array:\n"; ArrayXXf a3 = ArrayXXf::Zero(3, 4); std::cout << a3 << "\n"; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_Map_rowmajor.cpp ================================================ int array[8]; for(int i = 0; i < 8; ++i) array[i] = i; cout << "Column-major:\n" << Map >(array) << endl; cout << "Row-major:\n" << Map >(array) << endl; cout << "Row-major using stride:\n" << Map, Unaligned, Stride<1,4> >(array) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_Map_using.cpp ================================================ typedef Matrix MatrixType; typedef Map MapType; typedef Map MapTypeConst; // a read-only map const int n_dims = 5; MatrixType m1(n_dims), m2(n_dims); m1.setRandom(); m2.setRandom(); float *p = &m2(0); // get the address storing the data for m2 MapType m2map(p,m2.size()); // m2map shares data with m2 MapTypeConst m2mapconst(p,m2.size()); // a read-only accessor for m2 cout << "m1: " << m1 << endl; cout << "m2: " << m2 << endl; cout << "Squared euclidean distance: " << (m1-m2).squaredNorm() << endl; cout << "Squared euclidean distance, using map: " << (m1-m2map).squaredNorm() << endl; m2map(3) = 7; // this will change m2, since they share the same array cout << "Updated m2: " << m2 << endl; cout << "m2 coefficient 2, constant accessor: " << m2mapconst(2) << endl; /* m2mapconst(2) = 5; */ // this yields a compile-time error ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_ReshapeMat2Mat.cpp ================================================ MatrixXf M1(2,6); // Column-major storage M1 << 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12; Map M2(M1.data(), 6,2); cout << "M2:" << endl << M2 << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_ReshapeMat2Vec.cpp ================================================ MatrixXf M1(3,3); // Column-major storage M1 << 1, 2, 3, 4, 5, 6, 7, 8, 9; Map v1(M1.data(), M1.size()); cout << "v1:" << endl << v1 << endl; Matrix M2(M1); Map v2(M2.data(), M2.size()); cout << "v2:" << endl << v2 << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_SlicingCol.cpp ================================================ MatrixXf M1 = MatrixXf::Random(3,8); cout << "Column major input:" << endl << M1 << "\n"; Map > M2(M1.data(), M1.rows(), (M1.cols()+2)/3, OuterStride<>(M1.outerStride()*3)); cout << "1 column over 3:" << endl << M2 << "\n"; typedef Matrix RowMajorMatrixXf; RowMajorMatrixXf M3(M1); cout << "Row major input:" << endl << M3 << "\n"; Map > M4(M3.data(), M3.rows(), (M3.cols()+2)/3, Stride(M3.outerStride(),3)); cout << "1 column over 3:" << endl << M4 << "\n"; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_SlicingVec.cpp ================================================ RowVectorXf v = RowVectorXf::LinSpaced(20,0,19); cout << "Input:" << endl << v << endl; Map > v2(v.data(), v.size()/2); cout << "Even:" << v2 << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_commainit_01.cpp ================================================ Matrix3f m; m << 1, 2, 3, 4, 5, 6, 7, 8, 9; std::cout << m; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_commainit_01b.cpp ================================================ Matrix3f m; m.row(0) << 1, 2, 3; m.block(1,0,2,2) << 4, 5, 7, 8; m.col(2).tail(2) << 6, 9; std::cout << m; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_commainit_02.cpp ================================================ int rows=5, cols=5; MatrixXf m(rows,cols); m << (Matrix3f() << 1, 2, 3, 4, 5, 6, 7, 8, 9).finished(), MatrixXf::Zero(3,cols-3), MatrixXf::Zero(rows-3,3), MatrixXf::Identity(rows-3,cols-3); cout << m; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_range_for_loop_1d_cxx11.cpp ================================================ VectorXi v = VectorXi::Random(4); cout << "Here is the vector v:\n"; for(auto x : v) cout << x << " "; cout << "\n"; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_range_for_loop_2d_cxx11.cpp ================================================ Matrix2i A = Matrix2i::Random(); cout << "Here are the coeffs of the 2x2 matrix A:\n"; for(auto x : A.reshaped()) cout << x << " "; cout << "\n"; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_reshaped_vs_resize_1.cpp ================================================ MatrixXi m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.reshaped(2, 8):" << endl << m.reshaped(2, 8) << endl; m.resize(2,8); cout << "Here is the matrix m after m.resize(2,8):" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_reshaped_vs_resize_2.cpp ================================================ Matrix m = Matrix4i::Random(); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is m.reshaped(2, 8):" << endl << m.reshaped(2, 8) << endl; cout << "Here is m.reshaped(2, 8):" << endl << m.reshaped(2, 8) << endl; m.resize(2,8); cout << "Here is the matrix m after m.resize(2,8):" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_solve_matrix_inverse.cpp ================================================ Matrix3f A; Vector3f b; A << 1,2,3, 4,5,6, 7,8,10; b << 3, 3, 4; Vector3f x = A.inverse() * b; cout << "The solution is:" << endl << x << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_solve_multiple_rhs.cpp ================================================ Matrix3f A(3,3); A << 1,2,3, 4,5,6, 7,8,10; Matrix B; B << 3,1, 3,1, 4,1; Matrix X; X = A.fullPivLu().solve(B); cout << "The solution with right-hand side (3,3,4) is:" << endl; cout << X.col(0) << endl; cout << "The solution with right-hand side (1,1,1) is:" << endl; cout << X.col(1) << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_solve_reuse_decomposition.cpp ================================================ Matrix3f A(3,3); A << 1,2,3, 4,5,6, 7,8,10; PartialPivLU luOfA(A); // compute LU decomposition of A Vector3f b; b << 3,3,4; Vector3f x; x = luOfA.solve(b); cout << "The solution with right-hand side (3,3,4) is:" << endl; cout << x << endl; b << 1,1,1; x = luOfA.solve(b); cout << "The solution with right-hand side (1,1,1) is:" << endl; cout << x << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_solve_singular.cpp ================================================ Matrix3f A; Vector3f b; A << 1,2,3, 4,5,6, 7,8,9; b << 3, 3, 4; cout << "Here is the matrix A:" << endl << A << endl; cout << "Here is the vector b:" << endl << b << endl; Vector3f x; x = A.lu().solve(b); cout << "The solution is:" << endl << x << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_solve_triangular.cpp ================================================ Matrix3f A; Vector3f b; A << 1,2,3, 0,5,6, 0,0,10; b << 3, 3, 4; cout << "Here is the matrix A:" << endl << A << endl; cout << "Here is the vector b:" << endl << b << endl; Vector3f x = A.triangularView().solve(b); cout << "The solution is:" << endl << x << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_solve_triangular_inplace.cpp ================================================ Matrix3f A; Vector3f b; A << 1,2,3, 0,5,6, 0,0,10; b << 3, 3, 4; A.triangularView().solveInPlace(b); cout << "The solution is:" << endl << b << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_std_sort.cpp ================================================ Array4i v = Array4i::Random().abs(); cout << "Here is the initial vector v:\n" << v.transpose() << "\n"; std::sort(v.begin(), v.end()); cout << "Here is the sorted vector v:\n" << v.transpose() << "\n"; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Tutorial_std_sort_rows_cxx11.cpp ================================================ ArrayXXi A = ArrayXXi::Random(4,4).abs(); cout << "Here is the initial matrix A:\n" << A << "\n"; for(auto row : A.rowwise()) std::sort(row.begin(), row.end()); cout << "Here is the sorted matrix A:\n" << A << "\n"; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/VectorwiseOp_homogeneous.cpp ================================================ Matrix3Xd M = Matrix3Xd::Random(3,5); Projective3d P(Matrix4d::Random()); cout << "The matrix M is:" << endl << M << endl << endl; cout << "M.colwise().homogeneous():" << endl << M.colwise().homogeneous() << endl << endl; cout << "P * M.colwise().homogeneous():" << endl << P * M.colwise().homogeneous() << endl << endl; cout << "P * M.colwise().homogeneous().hnormalized(): " << endl << (P * M.colwise().homogeneous()).colwise().hnormalized() << endl << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/Vectorwise_reverse.cpp ================================================ MatrixXi m = MatrixXi::Random(3,4); cout << "Here is the matrix m:" << endl << m << endl; cout << "Here is the rowwise reverse of m:" << endl << m.rowwise().reverse() << endl; cout << "Here is the colwise reverse of m:" << endl << m.colwise().reverse() << endl; cout << "Here is the coefficient (1,0) in the rowise reverse of m:" << endl << m.rowwise().reverse()(1,0) << endl; cout << "Let us overwrite this coefficient with the value 4." << endl; //m.colwise().reverse()(1,0) = 4; cout << "Now the matrix m is:" << endl << m << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/class_FullPivLU.cpp ================================================ typedef Matrix Matrix5x3; typedef Matrix Matrix5x5; Matrix5x3 m = Matrix5x3::Random(); cout << "Here is the matrix m:" << endl << m << endl; Eigen::FullPivLU lu(m); cout << "Here is, up to permutations, its LU decomposition matrix:" << endl << lu.matrixLU() << endl; cout << "Here is the L part:" << endl; Matrix5x5 l = Matrix5x5::Identity(); l.block<5,3>(0,0).triangularView() = lu.matrixLU(); cout << l << endl; cout << "Here is the U part:" << endl; Matrix5x3 u = lu.matrixLU().triangularView(); cout << u << endl; cout << "Let us now reconstruct the original matrix m:" << endl; cout << lu.permutationP().inverse() * l * u * lu.permutationQ().inverse() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/compile_snippet.cpp.in ================================================ static bool eigen_did_assert = false; #define eigen_assert(X) if(!eigen_did_assert && !(X)){ std::cout << "### Assertion raised in " << __FILE__ << ":" << __LINE__ << ":\n" #X << "\n### The following would happen without assertions:\n"; eigen_did_assert = true;} #include #include #ifndef M_PI #define M_PI 3.1415926535897932384626433832795 #endif using namespace Eigen; using namespace std; int main(int, char**) { cout.precision(3); // intentionally remove indentation of snippet { ${snippet_source_code} } return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/tut_arithmetic_redux_minmax.cpp ================================================ Matrix3f m = Matrix3f::Random(); std::ptrdiff_t i, j; float minOfM = m.minCoeff(&i,&j); cout << "Here is the matrix m:\n" << m << endl; cout << "Its minimum coefficient (" << minOfM << ") is at position (" << i << "," << j << ")\n\n"; RowVector4i v = RowVector4i::Random(); int maxOfV = v.maxCoeff(&i); cout << "Here is the vector v: " << v << endl; cout << "Its maximum coefficient (" << maxOfV << ") is at position " << i << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/tut_arithmetic_transpose_aliasing.cpp ================================================ Matrix2i a; a << 1, 2, 3, 4; cout << "Here is the matrix a:\n" << a << endl; a = a.transpose(); // !!! do NOT do this !!! cout << "and the result of the aliasing effect:\n" << a << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/tut_arithmetic_transpose_conjugate.cpp ================================================ MatrixXcf a = MatrixXcf::Random(2,2); cout << "Here is the matrix a\n" << a << endl; cout << "Here is the matrix a^T\n" << a.transpose() << endl; cout << "Here is the conjugate of a\n" << a.conjugate() << endl; cout << "Here is the matrix a^*\n" << a.adjoint() << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/tut_arithmetic_transpose_inplace.cpp ================================================ MatrixXf a(2,3); a << 1, 2, 3, 4, 5, 6; cout << "Here is the initial matrix a:\n" << a << endl; a.transposeInPlace(); cout << "and after being transposed:\n" << a << endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/snippets/tut_matrix_assignment_resizing.cpp ================================================ MatrixXf a(2,2); std::cout << "a is of size " << a.rows() << "x" << a.cols() << std::endl; MatrixXf b(3,3); a = b; std::cout << "a is now of size " << a.rows() << "x" << a.cols() << std::endl; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/special_examples/Tutorial_sparse_example.cpp ================================================ #include #include #include typedef Eigen::SparseMatrix SpMat; // declares a column-major sparse matrix type of double typedef Eigen::Triplet T; void buildProblem(std::vector& coefficients, Eigen::VectorXd& b, int n); void saveAsBitmap(const Eigen::VectorXd& x, int n, const char* filename); int main(int argc, char** argv) { if(argc!=2) { std::cerr << "Error: expected one and only one argument.\n"; return -1; } int n = 300; // size of the image int m = n*n; // number of unknowns (=number of pixels) // Assembly: std::vector coefficients; // list of non-zeros coefficients Eigen::VectorXd b(m); // the right hand side-vector resulting from the constraints buildProblem(coefficients, b, n); SpMat A(m,m); A.setFromTriplets(coefficients.begin(), coefficients.end()); // Solving: Eigen::SimplicialCholesky chol(A); // performs a Cholesky factorization of A Eigen::VectorXd x = chol.solve(b); // use the factorization to solve for the given right hand side // Export the result to a file: saveAsBitmap(x, n, argv[1]); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/special_examples/Tutorial_sparse_example_details.cpp ================================================ #include #include #include typedef Eigen::SparseMatrix SpMat; // declares a column-major sparse matrix type of double typedef Eigen::Triplet T; void insertCoefficient(int id, int i, int j, double w, std::vector& coeffs, Eigen::VectorXd& b, const Eigen::VectorXd& boundary) { int n = int(boundary.size()); int id1 = i+j*n; if(i==-1 || i==n) b(id) -= w * boundary(j); // constrained coefficient else if(j==-1 || j==n) b(id) -= w * boundary(i); // constrained coefficient else coeffs.push_back(T(id,id1,w)); // unknown coefficient } void buildProblem(std::vector& coefficients, Eigen::VectorXd& b, int n) { b.setZero(); Eigen::ArrayXd boundary = Eigen::ArrayXd::LinSpaced(n, 0,M_PI).sin().pow(2); for(int j=0; j bits = (x*255).cast(); QImage img(bits.data(), n,n,QImage::Format_Indexed8); img.setColorCount(256); for(int i=0;i<256;i++) img.setColor(i,qRgb(i,i,i)); img.save(filename); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/special_examples/random_cpp11.cpp ================================================ #include #include #include using namespace Eigen; int main() { std::default_random_engine generator; std::poisson_distribution distribution(4.1); auto poisson = [&] () {return distribution(generator);}; RowVectorXi v = RowVectorXi::NullaryExpr(10, poisson ); std::cout << v << "\n"; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/doc/tutorial.cpp ================================================ #include int main(int argc, char *argv[]) { std::cout.precision(2); // demo static functions Eigen::Matrix3f m3 = Eigen::Matrix3f::Random(); Eigen::Matrix4f m4 = Eigen::Matrix4f::Identity(); std::cout << "*** Step 1 ***\nm3:\n" << m3 << "\nm4:\n" << m4 << std::endl; // demo non-static set... functions m4.setZero(); m3.diagonal().setOnes(); std::cout << "*** Step 2 ***\nm3:\n" << m3 << "\nm4:\n" << m4 << std::endl; // demo fixed-size block() expression as lvalue and as rvalue m4.block<3,3>(0,1) = m3; m3.row(2) = m4.block<1,3>(2,0); std::cout << "*** Step 3 ***\nm3:\n" << m3 << "\nm4:\n" << m4 << std::endl; // demo dynamic-size block() { int rows = 3, cols = 3; m4.block(0,1,3,3).setIdentity(); std::cout << "*** Step 4 ***\nm4:\n" << m4 << std::endl; } // demo vector blocks m4.diagonal().block(1,2).setOnes(); std::cout << "*** Step 5 ***\nm4.diagonal():\n" << m4.diagonal() << std::endl; std::cout << "m4.diagonal().start(3)\n" << m4.diagonal().start(3) << std::endl; // demo coeff-wise operations m4 = m4.cwise()*m4; m3 = m3.cwise().cos(); std::cout << "*** Step 6 ***\nm3:\n" << m3 << "\nm4:\n" << m4 << std::endl; // sums of coefficients std::cout << "*** Step 7 ***\n m4.sum(): " << m4.sum() << std::endl; std::cout << "m4.col(2).sum(): " << m4.col(2).sum() << std::endl; std::cout << "m4.colwise().sum():\n" << m4.colwise().sum() << std::endl; std::cout << "m4.rowwise().sum():\n" << m4.rowwise().sum() << std::endl; // demo intelligent auto-evaluation m4 = m4 * m4; // auto-evaluates so no aliasing problem (performance penalty is low) Eigen::Matrix4f other = (m4 * m4).lazy(); // forces lazy evaluation m4 = m4 + m4; // here Eigen goes for lazy evaluation, as with most expressions m4 = -m4 + m4 + 5 * m4; // same here, Eigen chooses lazy evaluation for all that. m4 = m4 * (m4 + m4); // here Eigen chooses to first evaluate m4 + m4 into a temporary. // indeed, here it is an optimization to cache this intermediate result. m3 = m3 * m4.block<3,3>(1,1); // here Eigen chooses NOT to evaluate block() into a temporary // because accessing coefficients of that block expression is not more costly than accessing // coefficients of a plain matrix. m4 = m4 * m4.transpose(); // same here, lazy evaluation of the transpose. m4 = m4 * m4.transpose().eval(); // forces immediate evaluation of the transpose std::cout << "*** Step 8 ***\nm3:\n" << m3 << "\nm4:\n" << m4 << std::endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/eigen3.pc.in ================================================ prefix=@CMAKE_INSTALL_PREFIX@ exec_prefix=${prefix} Name: Eigen3 Description: A C++ template library for linear algebra: vectors, matrices, and related algorithms Requires: Version: @EIGEN_VERSION_NUMBER@ Libs: Cflags: -I${prefix}/@INCLUDE_INSTALL_DIR@ ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/bdcsvd_int.cpp ================================================ #include "../Eigen/SVD" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define SCALAR int #else #define SCALAR float #endif using namespace Eigen; int main() { BDCSVD > qr(Matrix::Random(10,10)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/block_nonconst_ctor_on_const_xpr_0.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER Matrix3d &m){ Block b(m,0,0); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/block_nonconst_ctor_on_const_xpr_1.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER Matrix3d &m){ Block b(m,0,0,3,3); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/block_nonconst_ctor_on_const_xpr_2.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER Matrix3d &m){ // row/column constructor Block b(m,0); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/block_on_const_type_actually_const_0.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(){ Matrix3f m; Block(m, 0, 0, 3, 3).coeffRef(0, 0) = 1.0f; } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/block_on_const_type_actually_const_1.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(){ MatrixXf m; Block(m, 0, 0).coeffRef(0, 0) = 1.0f; } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/colpivqr_int.cpp ================================================ #include "../Eigen/QR" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define SCALAR int #else #define SCALAR float #endif using namespace Eigen; int main() { ColPivHouseholderQR > qr(Matrix::Random(10,10)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/const_qualified_block_method_retval_0.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER Matrix3d &m){ Block b(m.block<3,3>(0,0)); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/const_qualified_block_method_retval_1.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER Matrix3d &m){ Block b(m.block(0,0,3,3)); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/const_qualified_diagonal_method_retval.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER Matrix3d &m){ Diagonal b(m.diagonal()); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/const_qualified_transpose_method_retval.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER Matrix3d &m){ Transpose b(m.transpose()); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/cwiseunaryview_nonconst_ctor_on_const_xpr.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER Matrix3d &m){ CwiseUnaryView,Matrix3d> t(m); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/cwiseunaryview_on_const_type_actually_const.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(){ MatrixXf m; CwiseUnaryView,CV_QUALIFIER MatrixXf>(m).coeffRef(0, 0) = 1.0f; } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/diagonal_nonconst_ctor_on_const_xpr.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER Matrix3d &m){ Diagonal d(m); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/diagonal_on_const_type_actually_const.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(){ MatrixXf m; Diagonal(m).coeffRef(0) = 1.0f; } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/eigensolver_cplx.cpp ================================================ #include "../Eigen/Eigenvalues" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define SCALAR std::complex #else #define SCALAR float #endif using namespace Eigen; int main() { EigenSolver > eig(Matrix::Random(10,10)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/eigensolver_int.cpp ================================================ #include "../Eigen/Eigenvalues" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define SCALAR int #else #define SCALAR float #endif using namespace Eigen; int main() { EigenSolver > eig(Matrix::Random(10,10)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/failtest_sanity_check.cpp ================================================ #ifdef EIGEN_SHOULD_FAIL_TO_BUILD This is just some text that won't compile as a C++ file, as a basic sanity check for failtest. #else int main() {} #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/fullpivlu_int.cpp ================================================ #include "../Eigen/LU" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define SCALAR int #else #define SCALAR float #endif using namespace Eigen; int main() { FullPivLU > lu(Matrix::Random(10,10)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/fullpivqr_int.cpp ================================================ #include "../Eigen/QR" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define SCALAR int #else #define SCALAR float #endif using namespace Eigen; int main() { FullPivHouseholderQR > qr(Matrix::Random(10,10)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/initializer_list_1.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define ROWS Dynamic #else #define ROWS 3 #endif using namespace Eigen; int main() { Matrix {1, 2, 3}; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/initializer_list_2.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define ROWS Dynamic #define COLS Dynamic #else #define ROWS 3 #define COLS 1 #endif using namespace Eigen; int main() { Matrix {1, 2, 3}; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/jacobisvd_int.cpp ================================================ #include "../Eigen/SVD" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define SCALAR int #else #define SCALAR float #endif using namespace Eigen; int main() { JacobiSVD > qr(Matrix::Random(10,10)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/ldlt_int.cpp ================================================ #include "../Eigen/Cholesky" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define SCALAR int #else #define SCALAR float #endif using namespace Eigen; int main() { LDLT > ldlt(Matrix::Random(10,10)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/llt_int.cpp ================================================ #include "../Eigen/Cholesky" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define SCALAR int #else #define SCALAR float #endif using namespace Eigen; int main() { LLT > llt(Matrix::Random(10,10)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/map_nonconst_ctor_on_const_ptr_0.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER float *ptr){ Map m(ptr); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/map_nonconst_ctor_on_const_ptr_1.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER float *ptr, DenseIndex size){ Map m(ptr, size); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/map_nonconst_ctor_on_const_ptr_2.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER float *ptr, DenseIndex rows, DenseIndex cols){ Map m(ptr, rows, cols); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/map_nonconst_ctor_on_const_ptr_3.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER float *ptr, DenseIndex rows, DenseIndex cols){ Map > m(ptr, rows, cols, InnerStride<2>()); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/map_nonconst_ctor_on_const_ptr_4.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER #else #define CV_QUALIFIER const #endif using namespace Eigen; void foo(const float *ptr, DenseIndex rows, DenseIndex cols){ Map > m(ptr, rows, cols, OuterStride<>(2)); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/map_on_const_type_actually_const_0.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(float *ptr){ Map(ptr, 1, 1).coeffRef(0,0) = 1.0f; } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/map_on_const_type_actually_const_1.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(float *ptr){ Map(ptr).coeffRef(0) = 1.0f; } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/partialpivlu_int.cpp ================================================ #include "../Eigen/LU" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define SCALAR int #else #define SCALAR float #endif using namespace Eigen; int main() { PartialPivLU > lu(Matrix::Random(10,10)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/qr_int.cpp ================================================ #include "../Eigen/QR" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define SCALAR int #else #define SCALAR float #endif using namespace Eigen; int main() { HouseholderQR > qr(Matrix::Random(10,10)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/ref_1.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void call_ref(Ref a) { } int main() { VectorXf a(10); CV_QUALIFIER VectorXf& ac(a); call_ref(ac); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/ref_2.cpp ================================================ #include "../Eigen/Core" using namespace Eigen; void call_ref(Ref a) { } int main() { MatrixXf A(10,10); #ifdef EIGEN_SHOULD_FAIL_TO_BUILD call_ref(A.row(3)); #else call_ref(A.col(3)); #endif } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/ref_3.cpp ================================================ #include "../Eigen/Core" using namespace Eigen; #ifdef EIGEN_SHOULD_FAIL_TO_BUILD void call_ref(Ref a) { } #else void call_ref(const Ref &a) { } #endif int main() { VectorXf a(10); call_ref(a+a); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/ref_4.cpp ================================================ #include "../Eigen/Core" using namespace Eigen; void call_ref(Ref > a) {} int main() { MatrixXf A(10,10); #ifdef EIGEN_SHOULD_FAIL_TO_BUILD call_ref(A.transpose()); #else call_ref(A); #endif } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/ref_5.cpp ================================================ #include "../Eigen/Core" using namespace Eigen; void call_ref(Ref a) { } int main() { VectorXf a(10); DenseBase &ac(a); #ifdef EIGEN_SHOULD_FAIL_TO_BUILD call_ref(ac); #else call_ref(ac.derived()); #endif } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/selfadjointview_nonconst_ctor_on_const_xpr.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER Matrix3d &m){ SelfAdjointView t(m); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/selfadjointview_on_const_type_actually_const.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(){ MatrixXf m; SelfAdjointView(m).coeffRef(0, 0) = 1.0f; } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/sparse_ref_1.cpp ================================================ #include "../Eigen/Sparse" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void call_ref(Ref > a) { } int main() { SparseMatrix a(10,10); CV_QUALIFIER SparseMatrix& ac(a); call_ref(ac); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/sparse_ref_2.cpp ================================================ #include "../Eigen/Sparse" using namespace Eigen; void call_ref(Ref > a) { } int main() { SparseMatrix A(10,10); #ifdef EIGEN_SHOULD_FAIL_TO_BUILD call_ref(A.row(3)); #else call_ref(A.col(3)); #endif } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/sparse_ref_3.cpp ================================================ #include "../Eigen/Sparse" using namespace Eigen; #ifdef EIGEN_SHOULD_FAIL_TO_BUILD void call_ref(Ref > a) { } #else void call_ref(const Ref > &a) { } #endif int main() { SparseMatrix a(10,10); call_ref(a+a); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/sparse_ref_4.cpp ================================================ #include "../Eigen/Sparse" using namespace Eigen; void call_ref(Ref > a) {} int main() { SparseMatrix A(10,10); #ifdef EIGEN_SHOULD_FAIL_TO_BUILD call_ref(A.transpose()); #else call_ref(A); #endif } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/sparse_ref_5.cpp ================================================ #include "../Eigen/Sparse" using namespace Eigen; void call_ref(Ref > a) { } int main() { SparseMatrix a(10,10); SparseMatrixBase > &ac(a); #ifdef EIGEN_SHOULD_FAIL_TO_BUILD call_ref(ac); #else call_ref(ac.derived()); #endif } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/sparse_storage_mismatch.cpp ================================================ #include "../Eigen/Sparse" using namespace Eigen; typedef SparseMatrix Mat1; #ifdef EIGEN_SHOULD_FAIL_TO_BUILD typedef SparseMatrix Mat2; #else typedef SparseMatrix Mat2; #endif int main() { Mat1 a(10,10); Mat2 b(10,10); a += b; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/swap_1.cpp ================================================ #include "../Eigen/Core" using namespace Eigen; int main() { VectorXf a(10), b(10); #ifdef EIGEN_SHOULD_FAIL_TO_BUILD const DenseBase &ac(a); #else DenseBase &ac(a); #endif b.swap(ac); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/swap_2.cpp ================================================ #include "../Eigen/Core" using namespace Eigen; int main() { VectorXf a(10), b(10); VectorXf const &ac(a); #ifdef EIGEN_SHOULD_FAIL_TO_BUILD b.swap(ac); #else b.swap(ac.const_cast_derived()); #endif } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/ternary_1.cpp ================================================ #include "../Eigen/Core" using namespace Eigen; int main(int argc,char **) { VectorXf a(10), b(10); #ifdef EIGEN_SHOULD_FAIL_TO_BUILD b = argc>1 ? 2*a : -a; #else b = argc>1 ? 2*a : VectorXf(-a); #endif } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/ternary_2.cpp ================================================ #include "../Eigen/Core" using namespace Eigen; int main(int argc,char **) { VectorXf a(10), b(10); #ifdef EIGEN_SHOULD_FAIL_TO_BUILD b = argc>1 ? 2*a : a+a; #else b = argc>1 ? VectorXf(2*a) : VectorXf(a+a); #endif } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/transpose_nonconst_ctor_on_const_xpr.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER Matrix3d &m){ Transpose t(m); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/transpose_on_const_type_actually_const.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(){ MatrixXf m; Transpose(m).coeffRef(0, 0) = 1.0f; } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/triangularview_nonconst_ctor_on_const_xpr.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(CV_QUALIFIER Matrix3d &m){ TriangularView t(m); } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/failtest/triangularview_on_const_type_actually_const.cpp ================================================ #include "../Eigen/Core" #ifdef EIGEN_SHOULD_FAIL_TO_BUILD #define CV_QUALIFIER const #else #define CV_QUALIFIER #endif using namespace Eigen; void foo(){ MatrixXf m; TriangularView(m).coeffRef(0, 0) = 1.0f; } int main() {} ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/cholesky.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "lapack_common.h" #include // POTRF computes the Cholesky factorization of a real symmetric positive definite matrix A. EIGEN_LAPACK_FUNC(potrf,(char* uplo, int *n, RealScalar *pa, int *lda, int *info)) { *info = 0; if(UPLO(*uplo)==INVALID) *info = -1; else if(*n<0) *info = -2; else if(*lda(pa); MatrixType A(a,*n,*n,*lda); int ret; if(UPLO(*uplo)==UP) ret = int(internal::llt_inplace::blocked(A)); else ret = int(internal::llt_inplace::blocked(A)); if(ret>=0) *info = ret+1; return 0; } // POTRS solves a system of linear equations A*X = B with a symmetric // positive definite matrix A using the Cholesky factorization // A = U**T*U or A = L*L**T computed by DPOTRF. EIGEN_LAPACK_FUNC(potrs,(char* uplo, int *n, int *nrhs, RealScalar *pa, int *lda, RealScalar *pb, int *ldb, int *info)) { *info = 0; if(UPLO(*uplo)==INVALID) *info = -1; else if(*n<0) *info = -2; else if(*nrhs<0) *info = -3; else if(*lda(pa); Scalar* b = reinterpret_cast(pb); MatrixType A(a,*n,*n,*lda); MatrixType B(b,*n,*nrhs,*ldb); if(UPLO(*uplo)==UP) { A.triangularView().adjoint().solveInPlace(B); A.triangularView().solveInPlace(B); } else { A.triangularView().solveInPlace(B); A.triangularView().adjoint().solveInPlace(B); } return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/clacgv.f ================================================ *> \brief \b CLACGV * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download CLACGV + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE CLACGV( N, X, INCX ) * * .. Scalar Arguments .. * INTEGER INCX, N * .. * .. Array Arguments .. * COMPLEX X( * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> CLACGV conjugates a complex vector of length N. *> \endverbatim * * Arguments: * ========== * *> \param[in] N *> \verbatim *> N is INTEGER *> The length of the vector X. N >= 0. *> \endverbatim *> *> \param[in,out] X *> \verbatim *> X is COMPLEX array, dimension *> (1+(N-1)*abs(INCX)) *> On entry, the vector of length N to be conjugated. *> On exit, X is overwritten with conjg(X). *> \endverbatim *> *> \param[in] INCX *> \verbatim *> INCX is INTEGER *> The spacing between successive elements of X. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup complexOTHERauxiliary * * ===================================================================== SUBROUTINE CLACGV( N, X, INCX ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. INTEGER INCX, N * .. * .. Array Arguments .. COMPLEX X( * ) * .. * * ===================================================================== * * .. Local Scalars .. INTEGER I, IOFF * .. * .. Intrinsic Functions .. INTRINSIC CONJG * .. * .. Executable Statements .. * IF( INCX.EQ.1 ) THEN DO 10 I = 1, N X( I ) = CONJG( X( I ) ) 10 CONTINUE ELSE IOFF = 1 IF( INCX.LT.0 ) $ IOFF = 1 - ( N-1 )*INCX DO 20 I = 1, N X( IOFF ) = CONJG( X( IOFF ) ) IOFF = IOFF + INCX 20 CONTINUE END IF RETURN * * End of CLACGV * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/cladiv.f ================================================ *> \brief \b CLADIV * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download CLADIV + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * COMPLEX FUNCTION CLADIV( X, Y ) * * .. Scalar Arguments .. * COMPLEX X, Y * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> CLADIV := X / Y, where X and Y are complex. The computation of X / Y *> will not overflow on an intermediary step unless the results *> overflows. *> \endverbatim * * Arguments: * ========== * *> \param[in] X *> \verbatim *> X is COMPLEX *> \endverbatim *> *> \param[in] Y *> \verbatim *> Y is COMPLEX *> The complex scalars X and Y. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup complexOTHERauxiliary * * ===================================================================== COMPLEX FUNCTION CLADIV( X, Y ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. COMPLEX X, Y * .. * * ===================================================================== * * .. Local Scalars .. REAL ZI, ZR * .. * .. External Subroutines .. EXTERNAL SLADIV * .. * .. Intrinsic Functions .. INTRINSIC AIMAG, CMPLX, REAL * .. * .. Executable Statements .. * CALL SLADIV( REAL( X ), AIMAG( X ), REAL( Y ), AIMAG( Y ), ZR, $ ZI ) CLADIV = CMPLX( ZR, ZI ) * RETURN * * End of CLADIV * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/clarf.f ================================================ *> \brief \b CLARF * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download CLARF + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE CLARF( SIDE, M, N, V, INCV, TAU, C, LDC, WORK ) * * .. Scalar Arguments .. * CHARACTER SIDE * INTEGER INCV, LDC, M, N * COMPLEX TAU * .. * .. Array Arguments .. * COMPLEX C( LDC, * ), V( * ), WORK( * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> CLARF applies a complex elementary reflector H to a complex M-by-N *> matrix C, from either the left or the right. H is represented in the *> form *> *> H = I - tau * v * v**H *> *> where tau is a complex scalar and v is a complex vector. *> *> If tau = 0, then H is taken to be the unit matrix. *> *> To apply H**H (the conjugate transpose of H), supply conjg(tau) instead *> tau. *> \endverbatim * * Arguments: * ========== * *> \param[in] SIDE *> \verbatim *> SIDE is CHARACTER*1 *> = 'L': form H * C *> = 'R': form C * H *> \endverbatim *> *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix C. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix C. *> \endverbatim *> *> \param[in] V *> \verbatim *> V is COMPLEX array, dimension *> (1 + (M-1)*abs(INCV)) if SIDE = 'L' *> or (1 + (N-1)*abs(INCV)) if SIDE = 'R' *> The vector v in the representation of H. V is not used if *> TAU = 0. *> \endverbatim *> *> \param[in] INCV *> \verbatim *> INCV is INTEGER *> The increment between elements of v. INCV <> 0. *> \endverbatim *> *> \param[in] TAU *> \verbatim *> TAU is COMPLEX *> The value tau in the representation of H. *> \endverbatim *> *> \param[in,out] C *> \verbatim *> C is COMPLEX array, dimension (LDC,N) *> On entry, the M-by-N matrix C. *> On exit, C is overwritten by the matrix H * C if SIDE = 'L', *> or C * H if SIDE = 'R'. *> \endverbatim *> *> \param[in] LDC *> \verbatim *> LDC is INTEGER *> The leading dimension of the array C. LDC >= max(1,M). *> \endverbatim *> *> \param[out] WORK *> \verbatim *> WORK is COMPLEX array, dimension *> (N) if SIDE = 'L' *> or (M) if SIDE = 'R' *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup complexOTHERauxiliary * * ===================================================================== SUBROUTINE CLARF( SIDE, M, N, V, INCV, TAU, C, LDC, WORK ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. CHARACTER SIDE INTEGER INCV, LDC, M, N COMPLEX TAU * .. * .. Array Arguments .. COMPLEX C( LDC, * ), V( * ), WORK( * ) * .. * * ===================================================================== * * .. Parameters .. COMPLEX ONE, ZERO PARAMETER ( ONE = ( 1.0E+0, 0.0E+0 ), $ ZERO = ( 0.0E+0, 0.0E+0 ) ) * .. * .. Local Scalars .. LOGICAL APPLYLEFT INTEGER I, LASTV, LASTC * .. * .. External Subroutines .. EXTERNAL CGEMV, CGERC * .. * .. External Functions .. LOGICAL LSAME INTEGER ILACLR, ILACLC EXTERNAL LSAME, ILACLR, ILACLC * .. * .. Executable Statements .. * APPLYLEFT = LSAME( SIDE, 'L' ) LASTV = 0 LASTC = 0 IF( TAU.NE.ZERO ) THEN ! Set up variables for scanning V. LASTV begins pointing to the end ! of V. IF( APPLYLEFT ) THEN LASTV = M ELSE LASTV = N END IF IF( INCV.GT.0 ) THEN I = 1 + (LASTV-1) * INCV ELSE I = 1 END IF ! Look for the last non-zero row in V. DO WHILE( LASTV.GT.0 .AND. V( I ).EQ.ZERO ) LASTV = LASTV - 1 I = I - INCV END DO IF( APPLYLEFT ) THEN ! Scan for the last non-zero column in C(1:lastv,:). LASTC = ILACLC(LASTV, N, C, LDC) ELSE ! Scan for the last non-zero row in C(:,1:lastv). LASTC = ILACLR(M, LASTV, C, LDC) END IF END IF ! Note that lastc.eq.0 renders the BLAS operations null; no special ! case is needed at this level. IF( APPLYLEFT ) THEN * * Form H * C * IF( LASTV.GT.0 ) THEN * * w(1:lastc,1) := C(1:lastv,1:lastc)**H * v(1:lastv,1) * CALL CGEMV( 'Conjugate transpose', LASTV, LASTC, ONE, $ C, LDC, V, INCV, ZERO, WORK, 1 ) * * C(1:lastv,1:lastc) := C(...) - v(1:lastv,1) * w(1:lastc,1)**H * CALL CGERC( LASTV, LASTC, -TAU, V, INCV, WORK, 1, C, LDC ) END IF ELSE * * Form C * H * IF( LASTV.GT.0 ) THEN * * w(1:lastc,1) := C(1:lastc,1:lastv) * v(1:lastv,1) * CALL CGEMV( 'No transpose', LASTC, LASTV, ONE, C, LDC, $ V, INCV, ZERO, WORK, 1 ) * * C(1:lastc,1:lastv) := C(...) - w(1:lastc,1) * v(1:lastv,1)**H * CALL CGERC( LASTC, LASTV, -TAU, WORK, 1, V, INCV, C, LDC ) END IF END IF RETURN * * End of CLARF * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/clarfb.f ================================================ *> \brief \b CLARFB * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download CLARFB + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE CLARFB( SIDE, TRANS, DIRECT, STOREV, M, N, K, V, LDV, * T, LDT, C, LDC, WORK, LDWORK ) * * .. Scalar Arguments .. * CHARACTER DIRECT, SIDE, STOREV, TRANS * INTEGER K, LDC, LDT, LDV, LDWORK, M, N * .. * .. Array Arguments .. * COMPLEX C( LDC, * ), T( LDT, * ), V( LDV, * ), * $ WORK( LDWORK, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> CLARFB applies a complex block reflector H or its transpose H**H to a *> complex M-by-N matrix C, from either the left or the right. *> \endverbatim * * Arguments: * ========== * *> \param[in] SIDE *> \verbatim *> SIDE is CHARACTER*1 *> = 'L': apply H or H**H from the Left *> = 'R': apply H or H**H from the Right *> \endverbatim *> *> \param[in] TRANS *> \verbatim *> TRANS is CHARACTER*1 *> = 'N': apply H (No transpose) *> = 'C': apply H**H (Conjugate transpose) *> \endverbatim *> *> \param[in] DIRECT *> \verbatim *> DIRECT is CHARACTER*1 *> Indicates how H is formed from a product of elementary *> reflectors *> = 'F': H = H(1) H(2) . . . H(k) (Forward) *> = 'B': H = H(k) . . . H(2) H(1) (Backward) *> \endverbatim *> *> \param[in] STOREV *> \verbatim *> STOREV is CHARACTER*1 *> Indicates how the vectors which define the elementary *> reflectors are stored: *> = 'C': Columnwise *> = 'R': Rowwise *> \endverbatim *> *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix C. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix C. *> \endverbatim *> *> \param[in] K *> \verbatim *> K is INTEGER *> The order of the matrix T (= the number of elementary *> reflectors whose product defines the block reflector). *> \endverbatim *> *> \param[in] V *> \verbatim *> V is COMPLEX array, dimension *> (LDV,K) if STOREV = 'C' *> (LDV,M) if STOREV = 'R' and SIDE = 'L' *> (LDV,N) if STOREV = 'R' and SIDE = 'R' *> The matrix V. See Further Details. *> \endverbatim *> *> \param[in] LDV *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V. *> If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M); *> if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N); *> if STOREV = 'R', LDV >= K. *> \endverbatim *> *> \param[in] T *> \verbatim *> T is COMPLEX array, dimension (LDT,K) *> The triangular K-by-K matrix T in the representation of the *> block reflector. *> \endverbatim *> *> \param[in] LDT *> \verbatim *> LDT is INTEGER *> The leading dimension of the array T. LDT >= K. *> \endverbatim *> *> \param[in,out] C *> \verbatim *> C is COMPLEX array, dimension (LDC,N) *> On entry, the M-by-N matrix C. *> On exit, C is overwritten by H*C or H**H*C or C*H or C*H**H. *> \endverbatim *> *> \param[in] LDC *> \verbatim *> LDC is INTEGER *> The leading dimension of the array C. LDC >= max(1,M). *> \endverbatim *> *> \param[out] WORK *> \verbatim *> WORK is COMPLEX array, dimension (LDWORK,K) *> \endverbatim *> *> \param[in] LDWORK *> \verbatim *> LDWORK is INTEGER *> The leading dimension of the array WORK. *> If SIDE = 'L', LDWORK >= max(1,N); *> if SIDE = 'R', LDWORK >= max(1,M). *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup complexOTHERauxiliary * *> \par Further Details: * ===================== *> *> \verbatim *> *> The shape of the matrix V and the storage of the vectors which define *> the H(i) is best illustrated by the following example with n = 5 and *> k = 3. The elements equal to 1 are not stored; the corresponding *> array elements are modified but restored on exit. The rest of the *> array is not used. *> *> DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': *> *> V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) *> ( v1 1 ) ( 1 v2 v2 v2 ) *> ( v1 v2 1 ) ( 1 v3 v3 ) *> ( v1 v2 v3 ) *> ( v1 v2 v3 ) *> *> DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': *> *> V = ( v1 v2 v3 ) V = ( v1 v1 1 ) *> ( v1 v2 v3 ) ( v2 v2 v2 1 ) *> ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) *> ( 1 v3 ) *> ( 1 ) *> \endverbatim *> * ===================================================================== SUBROUTINE CLARFB( SIDE, TRANS, DIRECT, STOREV, M, N, K, V, LDV, $ T, LDT, C, LDC, WORK, LDWORK ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. CHARACTER DIRECT, SIDE, STOREV, TRANS INTEGER K, LDC, LDT, LDV, LDWORK, M, N * .. * .. Array Arguments .. COMPLEX C( LDC, * ), T( LDT, * ), V( LDV, * ), $ WORK( LDWORK, * ) * .. * * ===================================================================== * * .. Parameters .. COMPLEX ONE PARAMETER ( ONE = ( 1.0E+0, 0.0E+0 ) ) * .. * .. Local Scalars .. CHARACTER TRANST INTEGER I, J, LASTV, LASTC * .. * .. External Functions .. LOGICAL LSAME INTEGER ILACLR, ILACLC EXTERNAL LSAME, ILACLR, ILACLC * .. * .. External Subroutines .. EXTERNAL CCOPY, CGEMM, CLACGV, CTRMM * .. * .. Intrinsic Functions .. INTRINSIC CONJG * .. * .. Executable Statements .. * * Quick return if possible * IF( M.LE.0 .OR. N.LE.0 ) $ RETURN * IF( LSAME( TRANS, 'N' ) ) THEN TRANST = 'C' ELSE TRANST = 'N' END IF * IF( LSAME( STOREV, 'C' ) ) THEN * IF( LSAME( DIRECT, 'F' ) ) THEN * * Let V = ( V1 ) (first K rows) * ( V2 ) * where V1 is unit lower triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**H * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILACLR( M, K, V, LDV ) ) LASTC = ILACLC( LASTV, N, C, LDC ) * * W := C**H * V = (C1**H * V1 + C2**H * V2) (stored in WORK) * * W := C1**H * DO 10 J = 1, K CALL CCOPY( LASTC, C( J, 1 ), LDC, WORK( 1, J ), 1 ) CALL CLACGV( LASTC, WORK( 1, J ), 1 ) 10 CONTINUE * * W := W * V1 * CALL CTRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2**H *V2 * CALL CGEMM( 'Conjugate transpose', 'No transpose', $ LASTC, K, LASTV-K, ONE, C( K+1, 1 ), LDC, $ V( K+1, 1 ), LDV, ONE, WORK, LDWORK ) END IF * * W := W * T**H or W * T * CALL CTRMM( 'Right', 'Upper', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V * W**H * IF( M.GT.K ) THEN * * C2 := C2 - V2 * W**H * CALL CGEMM( 'No transpose', 'Conjugate transpose', $ LASTV-K, LASTC, K, -ONE, V( K+1, 1 ), LDV, $ WORK, LDWORK, ONE, C( K+1, 1 ), LDC ) END IF * * W := W * V1**H * CALL CTRMM( 'Right', 'Lower', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W**H * DO 30 J = 1, K DO 20 I = 1, LASTC C( J, I ) = C( J, I ) - CONJG( WORK( I, J ) ) 20 CONTINUE 30 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**H where C = ( C1 C2 ) * LASTV = MAX( K, ILACLR( N, K, V, LDV ) ) LASTC = ILACLR( M, LASTV, C, LDC ) * * W := C * V = (C1*V1 + C2*V2) (stored in WORK) * * W := C1 * DO 40 J = 1, K CALL CCOPY( LASTC, C( 1, J ), 1, WORK( 1, J ), 1 ) 40 CONTINUE * * W := W * V1 * CALL CTRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2 * V2 * CALL CGEMM( 'No transpose', 'No transpose', $ LASTC, K, LASTV-K, $ ONE, C( 1, K+1 ), LDC, V( K+1, 1 ), LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**H * CALL CTRMM( 'Right', 'Upper', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V**H * IF( LASTV.GT.K ) THEN * * C2 := C2 - W * V2**H * CALL CGEMM( 'No transpose', 'Conjugate transpose', $ LASTC, LASTV-K, K, $ -ONE, WORK, LDWORK, V( K+1, 1 ), LDV, $ ONE, C( 1, K+1 ), LDC ) END IF * * W := W * V1**H * CALL CTRMM( 'Right', 'Lower', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W * DO 60 J = 1, K DO 50 I = 1, LASTC C( I, J ) = C( I, J ) - WORK( I, J ) 50 CONTINUE 60 CONTINUE END IF * ELSE * * Let V = ( V1 ) * ( V2 ) (last K rows) * where V2 is unit upper triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**H * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILACLR( M, K, V, LDV ) ) LASTC = ILACLC( LASTV, N, C, LDC ) * * W := C**H * V = (C1**H * V1 + C2**H * V2) (stored in WORK) * * W := C2**H * DO 70 J = 1, K CALL CCOPY( LASTC, C( LASTV-K+J, 1 ), LDC, $ WORK( 1, J ), 1 ) CALL CLACGV( LASTC, WORK( 1, J ), 1 ) 70 CONTINUE * * W := W * V2 * CALL CTRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1**H*V1 * CALL CGEMM( 'Conjugate transpose', 'No transpose', $ LASTC, K, LASTV-K, ONE, C, LDC, V, LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T**H or W * T * CALL CTRMM( 'Right', 'Lower', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V * W**H * IF( LASTV.GT.K ) THEN * * C1 := C1 - V1 * W**H * CALL CGEMM( 'No transpose', 'Conjugate transpose', $ LASTV-K, LASTC, K, -ONE, V, LDV, WORK, LDWORK, $ ONE, C, LDC ) END IF * * W := W * V2**H * CALL CTRMM( 'Right', 'Upper', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) * * C2 := C2 - W**H * DO 90 J = 1, K DO 80 I = 1, LASTC C( LASTV-K+J, I ) = C( LASTV-K+J, I ) - $ CONJG( WORK( I, J ) ) 80 CONTINUE 90 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**H where C = ( C1 C2 ) * LASTV = MAX( K, ILACLR( N, K, V, LDV ) ) LASTC = ILACLR( M, LASTV, C, LDC ) * * W := C * V = (C1*V1 + C2*V2) (stored in WORK) * * W := C2 * DO 100 J = 1, K CALL CCOPY( LASTC, C( 1, LASTV-K+J ), 1, $ WORK( 1, J ), 1 ) 100 CONTINUE * * W := W * V2 * CALL CTRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1 * V1 * CALL CGEMM( 'No transpose', 'No transpose', $ LASTC, K, LASTV-K, $ ONE, C, LDC, V, LDV, ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**H * CALL CTRMM( 'Right', 'Lower', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V**H * IF( LASTV.GT.K ) THEN * * C1 := C1 - W * V1**H * CALL CGEMM( 'No transpose', 'Conjugate transpose', $ LASTC, LASTV-K, K, -ONE, WORK, LDWORK, V, LDV, $ ONE, C, LDC ) END IF * * W := W * V2**H * CALL CTRMM( 'Right', 'Upper', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) * * C2 := C2 - W * DO 120 J = 1, K DO 110 I = 1, LASTC C( I, LASTV-K+J ) = C( I, LASTV-K+J ) $ - WORK( I, J ) 110 CONTINUE 120 CONTINUE END IF END IF * ELSE IF( LSAME( STOREV, 'R' ) ) THEN * IF( LSAME( DIRECT, 'F' ) ) THEN * * Let V = ( V1 V2 ) (V1: first K columns) * where V1 is unit upper triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**H * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILACLC( K, M, V, LDV ) ) LASTC = ILACLC( LASTV, N, C, LDC ) * * W := C**H * V**H = (C1**H * V1**H + C2**H * V2**H) (stored in WORK) * * W := C1**H * DO 130 J = 1, K CALL CCOPY( LASTC, C( J, 1 ), LDC, WORK( 1, J ), 1 ) CALL CLACGV( LASTC, WORK( 1, J ), 1 ) 130 CONTINUE * * W := W * V1**H * CALL CTRMM( 'Right', 'Upper', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2**H*V2**H * CALL CGEMM( 'Conjugate transpose', $ 'Conjugate transpose', LASTC, K, LASTV-K, $ ONE, C( K+1, 1 ), LDC, V( 1, K+1 ), LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T**H or W * T * CALL CTRMM( 'Right', 'Upper', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V**H * W**H * IF( LASTV.GT.K ) THEN * * C2 := C2 - V2**H * W**H * CALL CGEMM( 'Conjugate transpose', $ 'Conjugate transpose', LASTV-K, LASTC, K, $ -ONE, V( 1, K+1 ), LDV, WORK, LDWORK, $ ONE, C( K+1, 1 ), LDC ) END IF * * W := W * V1 * CALL CTRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W**H * DO 150 J = 1, K DO 140 I = 1, LASTC C( J, I ) = C( J, I ) - CONJG( WORK( I, J ) ) 140 CONTINUE 150 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**H where C = ( C1 C2 ) * LASTV = MAX( K, ILACLC( K, N, V, LDV ) ) LASTC = ILACLR( M, LASTV, C, LDC ) * * W := C * V**H = (C1*V1**H + C2*V2**H) (stored in WORK) * * W := C1 * DO 160 J = 1, K CALL CCOPY( LASTC, C( 1, J ), 1, WORK( 1, J ), 1 ) 160 CONTINUE * * W := W * V1**H * CALL CTRMM( 'Right', 'Upper', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2 * V2**H * CALL CGEMM( 'No transpose', 'Conjugate transpose', $ LASTC, K, LASTV-K, ONE, C( 1, K+1 ), LDC, $ V( 1, K+1 ), LDV, ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**H * CALL CTRMM( 'Right', 'Upper', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V * IF( LASTV.GT.K ) THEN * * C2 := C2 - W * V2 * CALL CGEMM( 'No transpose', 'No transpose', $ LASTC, LASTV-K, K, $ -ONE, WORK, LDWORK, V( 1, K+1 ), LDV, $ ONE, C( 1, K+1 ), LDC ) END IF * * W := W * V1 * CALL CTRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W * DO 180 J = 1, K DO 170 I = 1, LASTC C( I, J ) = C( I, J ) - WORK( I, J ) 170 CONTINUE 180 CONTINUE * END IF * ELSE * * Let V = ( V1 V2 ) (V2: last K columns) * where V2 is unit lower triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**H * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILACLC( K, M, V, LDV ) ) LASTC = ILACLC( LASTV, N, C, LDC ) * * W := C**H * V**H = (C1**H * V1**H + C2**H * V2**H) (stored in WORK) * * W := C2**H * DO 190 J = 1, K CALL CCOPY( LASTC, C( LASTV-K+J, 1 ), LDC, $ WORK( 1, J ), 1 ) CALL CLACGV( LASTC, WORK( 1, J ), 1 ) 190 CONTINUE * * W := W * V2**H * CALL CTRMM( 'Right', 'Lower', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1**H * V1**H * CALL CGEMM( 'Conjugate transpose', $ 'Conjugate transpose', LASTC, K, LASTV-K, $ ONE, C, LDC, V, LDV, ONE, WORK, LDWORK ) END IF * * W := W * T**H or W * T * CALL CTRMM( 'Right', 'Lower', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V**H * W**H * IF( LASTV.GT.K ) THEN * * C1 := C1 - V1**H * W**H * CALL CGEMM( 'Conjugate transpose', $ 'Conjugate transpose', LASTV-K, LASTC, K, $ -ONE, V, LDV, WORK, LDWORK, ONE, C, LDC ) END IF * * W := W * V2 * CALL CTRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) * * C2 := C2 - W**H * DO 210 J = 1, K DO 200 I = 1, LASTC C( LASTV-K+J, I ) = C( LASTV-K+J, I ) - $ CONJG( WORK( I, J ) ) 200 CONTINUE 210 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**H where C = ( C1 C2 ) * LASTV = MAX( K, ILACLC( K, N, V, LDV ) ) LASTC = ILACLR( M, LASTV, C, LDC ) * * W := C * V**H = (C1*V1**H + C2*V2**H) (stored in WORK) * * W := C2 * DO 220 J = 1, K CALL CCOPY( LASTC, C( 1, LASTV-K+J ), 1, $ WORK( 1, J ), 1 ) 220 CONTINUE * * W := W * V2**H * CALL CTRMM( 'Right', 'Lower', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1 * V1**H * CALL CGEMM( 'No transpose', 'Conjugate transpose', $ LASTC, K, LASTV-K, ONE, C, LDC, V, LDV, ONE, $ WORK, LDWORK ) END IF * * W := W * T or W * T**H * CALL CTRMM( 'Right', 'Lower', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V * IF( LASTV.GT.K ) THEN * * C1 := C1 - W * V1 * CALL CGEMM( 'No transpose', 'No transpose', $ LASTC, LASTV-K, K, -ONE, WORK, LDWORK, V, LDV, $ ONE, C, LDC ) END IF * * W := W * V2 * CALL CTRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) * * C1 := C1 - W * DO 240 J = 1, K DO 230 I = 1, LASTC C( I, LASTV-K+J ) = C( I, LASTV-K+J ) $ - WORK( I, J ) 230 CONTINUE 240 CONTINUE * END IF * END IF END IF * RETURN * * End of CLARFB * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/clarfg.f ================================================ *> \brief \b CLARFG * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download CLARFG + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE CLARFG( N, ALPHA, X, INCX, TAU ) * * .. Scalar Arguments .. * INTEGER INCX, N * COMPLEX ALPHA, TAU * .. * .. Array Arguments .. * COMPLEX X( * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> CLARFG generates a complex elementary reflector H of order n, such *> that *> *> H**H * ( alpha ) = ( beta ), H**H * H = I. *> ( x ) ( 0 ) *> *> where alpha and beta are scalars, with beta real, and x is an *> (n-1)-element complex vector. H is represented in the form *> *> H = I - tau * ( 1 ) * ( 1 v**H ) , *> ( v ) *> *> where tau is a complex scalar and v is a complex (n-1)-element *> vector. Note that H is not hermitian. *> *> If the elements of x are all zero and alpha is real, then tau = 0 *> and H is taken to be the unit matrix. *> *> Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1 . *> \endverbatim * * Arguments: * ========== * *> \param[in] N *> \verbatim *> N is INTEGER *> The order of the elementary reflector. *> \endverbatim *> *> \param[in,out] ALPHA *> \verbatim *> ALPHA is COMPLEX *> On entry, the value alpha. *> On exit, it is overwritten with the value beta. *> \endverbatim *> *> \param[in,out] X *> \verbatim *> X is COMPLEX array, dimension *> (1+(N-2)*abs(INCX)) *> On entry, the vector x. *> On exit, it is overwritten with the vector v. *> \endverbatim *> *> \param[in] INCX *> \verbatim *> INCX is INTEGER *> The increment between elements of X. INCX > 0. *> \endverbatim *> *> \param[out] TAU *> \verbatim *> TAU is COMPLEX *> The value tau. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup complexOTHERauxiliary * * ===================================================================== SUBROUTINE CLARFG( N, ALPHA, X, INCX, TAU ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. INTEGER INCX, N COMPLEX ALPHA, TAU * .. * .. Array Arguments .. COMPLEX X( * ) * .. * * ===================================================================== * * .. Parameters .. REAL ONE, ZERO PARAMETER ( ONE = 1.0E+0, ZERO = 0.0E+0 ) * .. * .. Local Scalars .. INTEGER J, KNT REAL ALPHI, ALPHR, BETA, RSAFMN, SAFMIN, XNORM * .. * .. External Functions .. REAL SCNRM2, SLAMCH, SLAPY3 COMPLEX CLADIV EXTERNAL SCNRM2, SLAMCH, SLAPY3, CLADIV * .. * .. Intrinsic Functions .. INTRINSIC ABS, AIMAG, CMPLX, REAL, SIGN * .. * .. External Subroutines .. EXTERNAL CSCAL, CSSCAL * .. * .. Executable Statements .. * IF( N.LE.0 ) THEN TAU = ZERO RETURN END IF * XNORM = SCNRM2( N-1, X, INCX ) ALPHR = REAL( ALPHA ) ALPHI = AIMAG( ALPHA ) * IF( XNORM.EQ.ZERO .AND. ALPHI.EQ.ZERO ) THEN * * H = I * TAU = ZERO ELSE * * general case * BETA = -SIGN( SLAPY3( ALPHR, ALPHI, XNORM ), ALPHR ) SAFMIN = SLAMCH( 'S' ) / SLAMCH( 'E' ) RSAFMN = ONE / SAFMIN * KNT = 0 IF( ABS( BETA ).LT.SAFMIN ) THEN * * XNORM, BETA may be inaccurate; scale X and recompute them * 10 CONTINUE KNT = KNT + 1 CALL CSSCAL( N-1, RSAFMN, X, INCX ) BETA = BETA*RSAFMN ALPHI = ALPHI*RSAFMN ALPHR = ALPHR*RSAFMN IF( ABS( BETA ).LT.SAFMIN ) $ GO TO 10 * * New BETA is at most 1, at least SAFMIN * XNORM = SCNRM2( N-1, X, INCX ) ALPHA = CMPLX( ALPHR, ALPHI ) BETA = -SIGN( SLAPY3( ALPHR, ALPHI, XNORM ), ALPHR ) END IF TAU = CMPLX( ( BETA-ALPHR ) / BETA, -ALPHI / BETA ) ALPHA = CLADIV( CMPLX( ONE ), ALPHA-BETA ) CALL CSCAL( N-1, ALPHA, X, INCX ) * * If ALPHA is subnormal, it may lose relative accuracy * DO 20 J = 1, KNT BETA = BETA*SAFMIN 20 CONTINUE ALPHA = BETA END IF * RETURN * * End of CLARFG * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/clarft.f ================================================ *> \brief \b CLARFT * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download CLARFT + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE CLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) * * .. Scalar Arguments .. * CHARACTER DIRECT, STOREV * INTEGER K, LDT, LDV, N * .. * .. Array Arguments .. * COMPLEX T( LDT, * ), TAU( * ), V( LDV, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> CLARFT forms the triangular factor T of a complex block reflector H *> of order n, which is defined as a product of k elementary reflectors. *> *> If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; *> *> If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. *> *> If STOREV = 'C', the vector which defines the elementary reflector *> H(i) is stored in the i-th column of the array V, and *> *> H = I - V * T * V**H *> *> If STOREV = 'R', the vector which defines the elementary reflector *> H(i) is stored in the i-th row of the array V, and *> *> H = I - V**H * T * V *> \endverbatim * * Arguments: * ========== * *> \param[in] DIRECT *> \verbatim *> DIRECT is CHARACTER*1 *> Specifies the order in which the elementary reflectors are *> multiplied to form the block reflector: *> = 'F': H = H(1) H(2) . . . H(k) (Forward) *> = 'B': H = H(k) . . . H(2) H(1) (Backward) *> \endverbatim *> *> \param[in] STOREV *> \verbatim *> STOREV is CHARACTER*1 *> Specifies how the vectors which define the elementary *> reflectors are stored (see also Further Details): *> = 'C': columnwise *> = 'R': rowwise *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The order of the block reflector H. N >= 0. *> \endverbatim *> *> \param[in] K *> \verbatim *> K is INTEGER *> The order of the triangular factor T (= the number of *> elementary reflectors). K >= 1. *> \endverbatim *> *> \param[in] V *> \verbatim *> V is COMPLEX array, dimension *> (LDV,K) if STOREV = 'C' *> (LDV,N) if STOREV = 'R' *> The matrix V. See further details. *> \endverbatim *> *> \param[in] LDV *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V. *> If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. *> \endverbatim *> *> \param[in] TAU *> \verbatim *> TAU is COMPLEX array, dimension (K) *> TAU(i) must contain the scalar factor of the elementary *> reflector H(i). *> \endverbatim *> *> \param[out] T *> \verbatim *> T is COMPLEX array, dimension (LDT,K) *> The k by k triangular factor T of the block reflector. *> If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is *> lower triangular. The rest of the array is not used. *> \endverbatim *> *> \param[in] LDT *> \verbatim *> LDT is INTEGER *> The leading dimension of the array T. LDT >= K. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date April 2012 * *> \ingroup complexOTHERauxiliary * *> \par Further Details: * ===================== *> *> \verbatim *> *> The shape of the matrix V and the storage of the vectors which define *> the H(i) is best illustrated by the following example with n = 5 and *> k = 3. The elements equal to 1 are not stored. *> *> DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': *> *> V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) *> ( v1 1 ) ( 1 v2 v2 v2 ) *> ( v1 v2 1 ) ( 1 v3 v3 ) *> ( v1 v2 v3 ) *> ( v1 v2 v3 ) *> *> DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': *> *> V = ( v1 v2 v3 ) V = ( v1 v1 1 ) *> ( v1 v2 v3 ) ( v2 v2 v2 1 ) *> ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) *> ( 1 v3 ) *> ( 1 ) *> \endverbatim *> * ===================================================================== SUBROUTINE CLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) * * -- LAPACK auxiliary routine (version 3.4.1) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * April 2012 * * .. Scalar Arguments .. CHARACTER DIRECT, STOREV INTEGER K, LDT, LDV, N * .. * .. Array Arguments .. COMPLEX T( LDT, * ), TAU( * ), V( LDV, * ) * .. * * ===================================================================== * * .. Parameters .. COMPLEX ONE, ZERO PARAMETER ( ONE = ( 1.0E+0, 0.0E+0 ), $ ZERO = ( 0.0E+0, 0.0E+0 ) ) * .. * .. Local Scalars .. INTEGER I, J, PREVLASTV, LASTV * .. * .. External Subroutines .. EXTERNAL CGEMV, CLACGV, CTRMV * .. * .. External Functions .. LOGICAL LSAME EXTERNAL LSAME * .. * .. Executable Statements .. * * Quick return if possible * IF( N.EQ.0 ) $ RETURN * IF( LSAME( DIRECT, 'F' ) ) THEN PREVLASTV = N DO I = 1, K PREVLASTV = MAX( PREVLASTV, I ) IF( TAU( I ).EQ.ZERO ) THEN * * H(i) = I * DO J = 1, I T( J, I ) = ZERO END DO ELSE * * general case * IF( LSAME( STOREV, 'C' ) ) THEN * Skip any trailing zeros. DO LASTV = N, I+1, -1 IF( V( LASTV, I ).NE.ZERO ) EXIT END DO DO J = 1, I-1 T( J, I ) = -TAU( I ) * CONJG( V( I , J ) ) END DO J = MIN( LASTV, PREVLASTV ) * * T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)**H * V(i:j,i) * CALL CGEMV( 'Conjugate transpose', J-I, I-1, $ -TAU( I ), V( I+1, 1 ), LDV, $ V( I+1, I ), 1, $ ONE, T( 1, I ), 1 ) ELSE * Skip any trailing zeros. DO LASTV = N, I+1, -1 IF( V( I, LASTV ).NE.ZERO ) EXIT END DO DO J = 1, I-1 T( J, I ) = -TAU( I ) * V( J , I ) END DO J = MIN( LASTV, PREVLASTV ) * * T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)**H * CALL CGEMM( 'N', 'C', I-1, 1, J-I, -TAU( I ), $ V( 1, I+1 ), LDV, V( I, I+1 ), LDV, $ ONE, T( 1, I ), LDT ) END IF * * T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) * CALL CTRMV( 'Upper', 'No transpose', 'Non-unit', I-1, T, $ LDT, T( 1, I ), 1 ) T( I, I ) = TAU( I ) IF( I.GT.1 ) THEN PREVLASTV = MAX( PREVLASTV, LASTV ) ELSE PREVLASTV = LASTV END IF END IF END DO ELSE PREVLASTV = 1 DO I = K, 1, -1 IF( TAU( I ).EQ.ZERO ) THEN * * H(i) = I * DO J = I, K T( J, I ) = ZERO END DO ELSE * * general case * IF( I.LT.K ) THEN IF( LSAME( STOREV, 'C' ) ) THEN * Skip any leading zeros. DO LASTV = 1, I-1 IF( V( LASTV, I ).NE.ZERO ) EXIT END DO DO J = I+1, K T( J, I ) = -TAU( I ) * CONJG( V( N-K+I , J ) ) END DO J = MAX( LASTV, PREVLASTV ) * * T(i+1:k,i) = -tau(i) * V(j:n-k+i,i+1:k)**H * V(j:n-k+i,i) * CALL CGEMV( 'Conjugate transpose', N-K+I-J, K-I, $ -TAU( I ), V( J, I+1 ), LDV, V( J, I ), $ 1, ONE, T( I+1, I ), 1 ) ELSE * Skip any leading zeros. DO LASTV = 1, I-1 IF( V( I, LASTV ).NE.ZERO ) EXIT END DO DO J = I+1, K T( J, I ) = -TAU( I ) * V( J, N-K+I ) END DO J = MAX( LASTV, PREVLASTV ) * * T(i+1:k,i) = -tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)**H * CALL CGEMM( 'N', 'C', K-I, 1, N-K+I-J, -TAU( I ), $ V( I+1, J ), LDV, V( I, J ), LDV, $ ONE, T( I+1, I ), LDT ) END IF * * T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) * CALL CTRMV( 'Lower', 'No transpose', 'Non-unit', K-I, $ T( I+1, I+1 ), LDT, T( I+1, I ), 1 ) IF( I.GT.1 ) THEN PREVLASTV = MIN( PREVLASTV, LASTV ) ELSE PREVLASTV = LASTV END IF END IF T( I, I ) = TAU( I ) END IF END DO END IF RETURN * * End of CLARFT * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/complex_double.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define SCALAR std::complex #define SCALAR_SUFFIX z #define SCALAR_SUFFIX_UP "Z" #define REAL_SCALAR_SUFFIX d #define ISCOMPLEX 1 #include "cholesky.cpp" #include "lu.cpp" #include "svd.cpp" ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/complex_single.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define SCALAR std::complex #define SCALAR_SUFFIX c #define SCALAR_SUFFIX_UP "C" #define REAL_SCALAR_SUFFIX s #define ISCOMPLEX 1 #include "cholesky.cpp" #include "lu.cpp" #include "svd.cpp" ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/dladiv.f ================================================ *> \brief \b DLADIV * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download DLADIV + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE DLADIV( A, B, C, D, P, Q ) * * .. Scalar Arguments .. * DOUBLE PRECISION A, B, C, D, P, Q * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> DLADIV performs complex division in real arithmetic *> *> a + i*b *> p + i*q = --------- *> c + i*d *> *> The algorithm is due to Robert L. Smith and can be found *> in D. Knuth, The art of Computer Programming, Vol.2, p.195 *> \endverbatim * * Arguments: * ========== * *> \param[in] A *> \verbatim *> A is DOUBLE PRECISION *> \endverbatim *> *> \param[in] B *> \verbatim *> B is DOUBLE PRECISION *> \endverbatim *> *> \param[in] C *> \verbatim *> C is DOUBLE PRECISION *> \endverbatim *> *> \param[in] D *> \verbatim *> D is DOUBLE PRECISION *> The scalars a, b, c, and d in the above expression. *> \endverbatim *> *> \param[out] P *> \verbatim *> P is DOUBLE PRECISION *> \endverbatim *> *> \param[out] Q *> \verbatim *> Q is DOUBLE PRECISION *> The scalars p and q in the above expression. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup auxOTHERauxiliary * * ===================================================================== SUBROUTINE DLADIV( A, B, C, D, P, Q ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. DOUBLE PRECISION A, B, C, D, P, Q * .. * * ===================================================================== * * .. Local Scalars .. DOUBLE PRECISION E, F * .. * .. Intrinsic Functions .. INTRINSIC ABS * .. * .. Executable Statements .. * IF( ABS( D ).LT.ABS( C ) ) THEN E = D / C F = C + D*E P = ( A+B*E ) / F Q = ( B-A*E ) / F ELSE E = C / D F = D + C*E P = ( B+A*E ) / F Q = ( -A+B*E ) / F END IF * RETURN * * End of DLADIV * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/dlamch.f ================================================ *> \brief \b DLAMCH * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * * Definition: * =========== * * DOUBLE PRECISION FUNCTION DLAMCH( CMACH ) * * *> \par Purpose: * ============= *> *> \verbatim *> *> DLAMCH determines double precision machine parameters. *> \endverbatim * * Arguments: * ========== * *> \param[in] CMACH *> \verbatim *> Specifies the value to be returned by DLAMCH: *> = 'E' or 'e', DLAMCH := eps *> = 'S' or 's , DLAMCH := sfmin *> = 'B' or 'b', DLAMCH := base *> = 'P' or 'p', DLAMCH := eps*base *> = 'N' or 'n', DLAMCH := t *> = 'R' or 'r', DLAMCH := rnd *> = 'M' or 'm', DLAMCH := emin *> = 'U' or 'u', DLAMCH := rmin *> = 'L' or 'l', DLAMCH := emax *> = 'O' or 'o', DLAMCH := rmax *> where *> eps = relative machine precision *> sfmin = safe minimum, such that 1/sfmin does not overflow *> base = base of the machine *> prec = eps*base *> t = number of (base) digits in the mantissa *> rnd = 1.0 when rounding occurs in addition, 0.0 otherwise *> emin = minimum exponent before (gradual) underflow *> rmin = underflow threshold - base**(emin-1) *> emax = largest exponent before overflow *> rmax = overflow threshold - (base**emax)*(1-eps) *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup auxOTHERauxiliary * * ===================================================================== DOUBLE PRECISION FUNCTION DLAMCH( CMACH ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. CHARACTER CMACH * .. * * ===================================================================== * * .. Parameters .. DOUBLE PRECISION ONE, ZERO PARAMETER ( ONE = 1.0D+0, ZERO = 0.0D+0 ) * .. * .. Local Scalars .. DOUBLE PRECISION RND, EPS, SFMIN, SMALL, RMACH * .. * .. External Functions .. LOGICAL LSAME EXTERNAL LSAME * .. * .. Intrinsic Functions .. INTRINSIC DIGITS, EPSILON, HUGE, MAXEXPONENT, $ MINEXPONENT, RADIX, TINY * .. * .. Executable Statements .. * * * Assume rounding, not chopping. Always. * RND = ONE * IF( ONE.EQ.RND ) THEN EPS = EPSILON(ZERO) * 0.5 ELSE EPS = EPSILON(ZERO) END IF * IF( LSAME( CMACH, 'E' ) ) THEN RMACH = EPS ELSE IF( LSAME( CMACH, 'S' ) ) THEN SFMIN = TINY(ZERO) SMALL = ONE / HUGE(ZERO) IF( SMALL.GE.SFMIN ) THEN * * Use SMALL plus a bit, to avoid the possibility of rounding * causing overflow when computing 1/sfmin. * SFMIN = SMALL*( ONE+EPS ) END IF RMACH = SFMIN ELSE IF( LSAME( CMACH, 'B' ) ) THEN RMACH = RADIX(ZERO) ELSE IF( LSAME( CMACH, 'P' ) ) THEN RMACH = EPS * RADIX(ZERO) ELSE IF( LSAME( CMACH, 'N' ) ) THEN RMACH = DIGITS(ZERO) ELSE IF( LSAME( CMACH, 'R' ) ) THEN RMACH = RND ELSE IF( LSAME( CMACH, 'M' ) ) THEN RMACH = MINEXPONENT(ZERO) ELSE IF( LSAME( CMACH, 'U' ) ) THEN RMACH = tiny(zero) ELSE IF( LSAME( CMACH, 'L' ) ) THEN RMACH = MAXEXPONENT(ZERO) ELSE IF( LSAME( CMACH, 'O' ) ) THEN RMACH = HUGE(ZERO) ELSE RMACH = ZERO END IF * DLAMCH = RMACH RETURN * * End of DLAMCH * END ************************************************************************ *> \brief \b DLAMC3 *> \details *> \b Purpose: *> \verbatim *> DLAMC3 is intended to force A and B to be stored prior to doing *> the addition of A and B , for use in situations where optimizers *> might hold one of these in a register. *> \endverbatim *> \author LAPACK is a software package provided by Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. *> \date November 2011 *> \ingroup auxOTHERauxiliary *> *> \param[in] A *> \verbatim *> A is a DOUBLE PRECISION *> \endverbatim *> *> \param[in] B *> \verbatim *> B is a DOUBLE PRECISION *> The values A and B. *> \endverbatim *> DOUBLE PRECISION FUNCTION DLAMC3( A, B ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. * November 2010 * * .. Scalar Arguments .. DOUBLE PRECISION A, B * .. * ===================================================================== * * .. Executable Statements .. * DLAMC3 = A + B * RETURN * * End of DLAMC3 * END * ************************************************************************ ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/dlapy2.f ================================================ *> \brief \b DLAPY2 * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download DLAPY2 + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * DOUBLE PRECISION FUNCTION DLAPY2( X, Y ) * * .. Scalar Arguments .. * DOUBLE PRECISION X, Y * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> DLAPY2 returns sqrt(x**2+y**2), taking care not to cause unnecessary *> overflow. *> \endverbatim * * Arguments: * ========== * *> \param[in] X *> \verbatim *> X is DOUBLE PRECISION *> \endverbatim *> *> \param[in] Y *> \verbatim *> Y is DOUBLE PRECISION *> X and Y specify the values x and y. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup auxOTHERauxiliary * * ===================================================================== DOUBLE PRECISION FUNCTION DLAPY2( X, Y ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. DOUBLE PRECISION X, Y * .. * * ===================================================================== * * .. Parameters .. DOUBLE PRECISION ZERO PARAMETER ( ZERO = 0.0D0 ) DOUBLE PRECISION ONE PARAMETER ( ONE = 1.0D0 ) * .. * .. Local Scalars .. DOUBLE PRECISION W, XABS, YABS, Z * .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT * .. * .. Executable Statements .. * XABS = ABS( X ) YABS = ABS( Y ) W = MAX( XABS, YABS ) Z = MIN( XABS, YABS ) IF( Z.EQ.ZERO ) THEN DLAPY2 = W ELSE DLAPY2 = W*SQRT( ONE+( Z / W )**2 ) END IF RETURN * * End of DLAPY2 * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/dlapy3.f ================================================ *> \brief \b DLAPY3 * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download DLAPY3 + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * DOUBLE PRECISION FUNCTION DLAPY3( X, Y, Z ) * * .. Scalar Arguments .. * DOUBLE PRECISION X, Y, Z * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> DLAPY3 returns sqrt(x**2+y**2+z**2), taking care not to cause *> unnecessary overflow. *> \endverbatim * * Arguments: * ========== * *> \param[in] X *> \verbatim *> X is DOUBLE PRECISION *> \endverbatim *> *> \param[in] Y *> \verbatim *> Y is DOUBLE PRECISION *> \endverbatim *> *> \param[in] Z *> \verbatim *> Z is DOUBLE PRECISION *> X, Y and Z specify the values x, y and z. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup auxOTHERauxiliary * * ===================================================================== DOUBLE PRECISION FUNCTION DLAPY3( X, Y, Z ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. DOUBLE PRECISION X, Y, Z * .. * * ===================================================================== * * .. Parameters .. DOUBLE PRECISION ZERO PARAMETER ( ZERO = 0.0D0 ) * .. * .. Local Scalars .. DOUBLE PRECISION W, XABS, YABS, ZABS * .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, SQRT * .. * .. Executable Statements .. * XABS = ABS( X ) YABS = ABS( Y ) ZABS = ABS( Z ) W = MAX( XABS, YABS, ZABS ) IF( W.EQ.ZERO ) THEN * W can be zero for max(0,nan,0) * adding all three entries together will make sure * NaN will not disappear. DLAPY3 = XABS + YABS + ZABS ELSE DLAPY3 = W*SQRT( ( XABS / W )**2+( YABS / W )**2+ $ ( ZABS / W )**2 ) END IF RETURN * * End of DLAPY3 * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/dlarf.f ================================================ *> \brief \b DLARF * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download DLARF + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE DLARF( SIDE, M, N, V, INCV, TAU, C, LDC, WORK ) * * .. Scalar Arguments .. * CHARACTER SIDE * INTEGER INCV, LDC, M, N * DOUBLE PRECISION TAU * .. * .. Array Arguments .. * DOUBLE PRECISION C( LDC, * ), V( * ), WORK( * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> DLARF applies a real elementary reflector H to a real m by n matrix *> C, from either the left or the right. H is represented in the form *> *> H = I - tau * v * v**T *> *> where tau is a real scalar and v is a real vector. *> *> If tau = 0, then H is taken to be the unit matrix. *> \endverbatim * * Arguments: * ========== * *> \param[in] SIDE *> \verbatim *> SIDE is CHARACTER*1 *> = 'L': form H * C *> = 'R': form C * H *> \endverbatim *> *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix C. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix C. *> \endverbatim *> *> \param[in] V *> \verbatim *> V is DOUBLE PRECISION array, dimension *> (1 + (M-1)*abs(INCV)) if SIDE = 'L' *> or (1 + (N-1)*abs(INCV)) if SIDE = 'R' *> The vector v in the representation of H. V is not used if *> TAU = 0. *> \endverbatim *> *> \param[in] INCV *> \verbatim *> INCV is INTEGER *> The increment between elements of v. INCV <> 0. *> \endverbatim *> *> \param[in] TAU *> \verbatim *> TAU is DOUBLE PRECISION *> The value tau in the representation of H. *> \endverbatim *> *> \param[in,out] C *> \verbatim *> C is DOUBLE PRECISION array, dimension (LDC,N) *> On entry, the m by n matrix C. *> On exit, C is overwritten by the matrix H * C if SIDE = 'L', *> or C * H if SIDE = 'R'. *> \endverbatim *> *> \param[in] LDC *> \verbatim *> LDC is INTEGER *> The leading dimension of the array C. LDC >= max(1,M). *> \endverbatim *> *> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array, dimension *> (N) if SIDE = 'L' *> or (M) if SIDE = 'R' *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup doubleOTHERauxiliary * * ===================================================================== SUBROUTINE DLARF( SIDE, M, N, V, INCV, TAU, C, LDC, WORK ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. CHARACTER SIDE INTEGER INCV, LDC, M, N DOUBLE PRECISION TAU * .. * .. Array Arguments .. DOUBLE PRECISION C( LDC, * ), V( * ), WORK( * ) * .. * * ===================================================================== * * .. Parameters .. DOUBLE PRECISION ONE, ZERO PARAMETER ( ONE = 1.0D+0, ZERO = 0.0D+0 ) * .. * .. Local Scalars .. LOGICAL APPLYLEFT INTEGER I, LASTV, LASTC * .. * .. External Subroutines .. EXTERNAL DGEMV, DGER * .. * .. External Functions .. LOGICAL LSAME INTEGER ILADLR, ILADLC EXTERNAL LSAME, ILADLR, ILADLC * .. * .. Executable Statements .. * APPLYLEFT = LSAME( SIDE, 'L' ) LASTV = 0 LASTC = 0 IF( TAU.NE.ZERO ) THEN ! Set up variables for scanning V. LASTV begins pointing to the end ! of V. IF( APPLYLEFT ) THEN LASTV = M ELSE LASTV = N END IF IF( INCV.GT.0 ) THEN I = 1 + (LASTV-1) * INCV ELSE I = 1 END IF ! Look for the last non-zero row in V. DO WHILE( LASTV.GT.0 .AND. V( I ).EQ.ZERO ) LASTV = LASTV - 1 I = I - INCV END DO IF( APPLYLEFT ) THEN ! Scan for the last non-zero column in C(1:lastv,:). LASTC = ILADLC(LASTV, N, C, LDC) ELSE ! Scan for the last non-zero row in C(:,1:lastv). LASTC = ILADLR(M, LASTV, C, LDC) END IF END IF ! Note that lastc.eq.0 renders the BLAS operations null; no special ! case is needed at this level. IF( APPLYLEFT ) THEN * * Form H * C * IF( LASTV.GT.0 ) THEN * * w(1:lastc,1) := C(1:lastv,1:lastc)**T * v(1:lastv,1) * CALL DGEMV( 'Transpose', LASTV, LASTC, ONE, C, LDC, V, INCV, $ ZERO, WORK, 1 ) * * C(1:lastv,1:lastc) := C(...) - v(1:lastv,1) * w(1:lastc,1)**T * CALL DGER( LASTV, LASTC, -TAU, V, INCV, WORK, 1, C, LDC ) END IF ELSE * * Form C * H * IF( LASTV.GT.0 ) THEN * * w(1:lastc,1) := C(1:lastc,1:lastv) * v(1:lastv,1) * CALL DGEMV( 'No transpose', LASTC, LASTV, ONE, C, LDC, $ V, INCV, ZERO, WORK, 1 ) * * C(1:lastc,1:lastv) := C(...) - w(1:lastc,1) * v(1:lastv,1)**T * CALL DGER( LASTC, LASTV, -TAU, WORK, 1, V, INCV, C, LDC ) END IF END IF RETURN * * End of DLARF * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/dlarfb.f ================================================ *> \brief \b DLARFB * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download DLARFB + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE DLARFB( SIDE, TRANS, DIRECT, STOREV, M, N, K, V, LDV, * T, LDT, C, LDC, WORK, LDWORK ) * * .. Scalar Arguments .. * CHARACTER DIRECT, SIDE, STOREV, TRANS * INTEGER K, LDC, LDT, LDV, LDWORK, M, N * .. * .. Array Arguments .. * DOUBLE PRECISION C( LDC, * ), T( LDT, * ), V( LDV, * ), * $ WORK( LDWORK, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> DLARFB applies a real block reflector H or its transpose H**T to a *> real m by n matrix C, from either the left or the right. *> \endverbatim * * Arguments: * ========== * *> \param[in] SIDE *> \verbatim *> SIDE is CHARACTER*1 *> = 'L': apply H or H**T from the Left *> = 'R': apply H or H**T from the Right *> \endverbatim *> *> \param[in] TRANS *> \verbatim *> TRANS is CHARACTER*1 *> = 'N': apply H (No transpose) *> = 'T': apply H**T (Transpose) *> \endverbatim *> *> \param[in] DIRECT *> \verbatim *> DIRECT is CHARACTER*1 *> Indicates how H is formed from a product of elementary *> reflectors *> = 'F': H = H(1) H(2) . . . H(k) (Forward) *> = 'B': H = H(k) . . . H(2) H(1) (Backward) *> \endverbatim *> *> \param[in] STOREV *> \verbatim *> STOREV is CHARACTER*1 *> Indicates how the vectors which define the elementary *> reflectors are stored: *> = 'C': Columnwise *> = 'R': Rowwise *> \endverbatim *> *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix C. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix C. *> \endverbatim *> *> \param[in] K *> \verbatim *> K is INTEGER *> The order of the matrix T (= the number of elementary *> reflectors whose product defines the block reflector). *> \endverbatim *> *> \param[in] V *> \verbatim *> V is DOUBLE PRECISION array, dimension *> (LDV,K) if STOREV = 'C' *> (LDV,M) if STOREV = 'R' and SIDE = 'L' *> (LDV,N) if STOREV = 'R' and SIDE = 'R' *> The matrix V. See Further Details. *> \endverbatim *> *> \param[in] LDV *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V. *> If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M); *> if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N); *> if STOREV = 'R', LDV >= K. *> \endverbatim *> *> \param[in] T *> \verbatim *> T is DOUBLE PRECISION array, dimension (LDT,K) *> The triangular k by k matrix T in the representation of the *> block reflector. *> \endverbatim *> *> \param[in] LDT *> \verbatim *> LDT is INTEGER *> The leading dimension of the array T. LDT >= K. *> \endverbatim *> *> \param[in,out] C *> \verbatim *> C is DOUBLE PRECISION array, dimension (LDC,N) *> On entry, the m by n matrix C. *> On exit, C is overwritten by H*C or H**T*C or C*H or C*H**T. *> \endverbatim *> *> \param[in] LDC *> \verbatim *> LDC is INTEGER *> The leading dimension of the array C. LDC >= max(1,M). *> \endverbatim *> *> \param[out] WORK *> \verbatim *> WORK is DOUBLE PRECISION array, dimension (LDWORK,K) *> \endverbatim *> *> \param[in] LDWORK *> \verbatim *> LDWORK is INTEGER *> The leading dimension of the array WORK. *> If SIDE = 'L', LDWORK >= max(1,N); *> if SIDE = 'R', LDWORK >= max(1,M). *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup doubleOTHERauxiliary * *> \par Further Details: * ===================== *> *> \verbatim *> *> The shape of the matrix V and the storage of the vectors which define *> the H(i) is best illustrated by the following example with n = 5 and *> k = 3. The elements equal to 1 are not stored; the corresponding *> array elements are modified but restored on exit. The rest of the *> array is not used. *> *> DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': *> *> V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) *> ( v1 1 ) ( 1 v2 v2 v2 ) *> ( v1 v2 1 ) ( 1 v3 v3 ) *> ( v1 v2 v3 ) *> ( v1 v2 v3 ) *> *> DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': *> *> V = ( v1 v2 v3 ) V = ( v1 v1 1 ) *> ( v1 v2 v3 ) ( v2 v2 v2 1 ) *> ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) *> ( 1 v3 ) *> ( 1 ) *> \endverbatim *> * ===================================================================== SUBROUTINE DLARFB( SIDE, TRANS, DIRECT, STOREV, M, N, K, V, LDV, $ T, LDT, C, LDC, WORK, LDWORK ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. CHARACTER DIRECT, SIDE, STOREV, TRANS INTEGER K, LDC, LDT, LDV, LDWORK, M, N * .. * .. Array Arguments .. DOUBLE PRECISION C( LDC, * ), T( LDT, * ), V( LDV, * ), $ WORK( LDWORK, * ) * .. * * ===================================================================== * * .. Parameters .. DOUBLE PRECISION ONE PARAMETER ( ONE = 1.0D+0 ) * .. * .. Local Scalars .. CHARACTER TRANST INTEGER I, J, LASTV, LASTC * .. * .. External Functions .. LOGICAL LSAME INTEGER ILADLR, ILADLC EXTERNAL LSAME, ILADLR, ILADLC * .. * .. External Subroutines .. EXTERNAL DCOPY, DGEMM, DTRMM * .. * .. Executable Statements .. * * Quick return if possible * IF( M.LE.0 .OR. N.LE.0 ) $ RETURN * IF( LSAME( TRANS, 'N' ) ) THEN TRANST = 'T' ELSE TRANST = 'N' END IF * IF( LSAME( STOREV, 'C' ) ) THEN * IF( LSAME( DIRECT, 'F' ) ) THEN * * Let V = ( V1 ) (first K rows) * ( V2 ) * where V1 is unit lower triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**T * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILADLR( M, K, V, LDV ) ) LASTC = ILADLC( LASTV, N, C, LDC ) * * W := C**T * V = (C1**T * V1 + C2**T * V2) (stored in WORK) * * W := C1**T * DO 10 J = 1, K CALL DCOPY( LASTC, C( J, 1 ), LDC, WORK( 1, J ), 1 ) 10 CONTINUE * * W := W * V1 * CALL DTRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2**T *V2 * CALL DGEMM( 'Transpose', 'No transpose', $ LASTC, K, LASTV-K, $ ONE, C( K+1, 1 ), LDC, V( K+1, 1 ), LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T**T or W * T * CALL DTRMM( 'Right', 'Upper', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V * W**T * IF( LASTV.GT.K ) THEN * * C2 := C2 - V2 * W**T * CALL DGEMM( 'No transpose', 'Transpose', $ LASTV-K, LASTC, K, $ -ONE, V( K+1, 1 ), LDV, WORK, LDWORK, ONE, $ C( K+1, 1 ), LDC ) END IF * * W := W * V1**T * CALL DTRMM( 'Right', 'Lower', 'Transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W**T * DO 30 J = 1, K DO 20 I = 1, LASTC C( J, I ) = C( J, I ) - WORK( I, J ) 20 CONTINUE 30 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**T where C = ( C1 C2 ) * LASTV = MAX( K, ILADLR( N, K, V, LDV ) ) LASTC = ILADLR( M, LASTV, C, LDC ) * * W := C * V = (C1*V1 + C2*V2) (stored in WORK) * * W := C1 * DO 40 J = 1, K CALL DCOPY( LASTC, C( 1, J ), 1, WORK( 1, J ), 1 ) 40 CONTINUE * * W := W * V1 * CALL DTRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2 * V2 * CALL DGEMM( 'No transpose', 'No transpose', $ LASTC, K, LASTV-K, $ ONE, C( 1, K+1 ), LDC, V( K+1, 1 ), LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**T * CALL DTRMM( 'Right', 'Upper', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V**T * IF( LASTV.GT.K ) THEN * * C2 := C2 - W * V2**T * CALL DGEMM( 'No transpose', 'Transpose', $ LASTC, LASTV-K, K, $ -ONE, WORK, LDWORK, V( K+1, 1 ), LDV, ONE, $ C( 1, K+1 ), LDC ) END IF * * W := W * V1**T * CALL DTRMM( 'Right', 'Lower', 'Transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W * DO 60 J = 1, K DO 50 I = 1, LASTC C( I, J ) = C( I, J ) - WORK( I, J ) 50 CONTINUE 60 CONTINUE END IF * ELSE * * Let V = ( V1 ) * ( V2 ) (last K rows) * where V2 is unit upper triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**T * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILADLR( M, K, V, LDV ) ) LASTC = ILADLC( LASTV, N, C, LDC ) * * W := C**T * V = (C1**T * V1 + C2**T * V2) (stored in WORK) * * W := C2**T * DO 70 J = 1, K CALL DCOPY( LASTC, C( LASTV-K+J, 1 ), LDC, $ WORK( 1, J ), 1 ) 70 CONTINUE * * W := W * V2 * CALL DTRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1**T*V1 * CALL DGEMM( 'Transpose', 'No transpose', $ LASTC, K, LASTV-K, ONE, C, LDC, V, LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T**T or W * T * CALL DTRMM( 'Right', 'Lower', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V * W**T * IF( LASTV.GT.K ) THEN * * C1 := C1 - V1 * W**T * CALL DGEMM( 'No transpose', 'Transpose', $ LASTV-K, LASTC, K, -ONE, V, LDV, WORK, LDWORK, $ ONE, C, LDC ) END IF * * W := W * V2**T * CALL DTRMM( 'Right', 'Upper', 'Transpose', 'Unit', $ LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) * * C2 := C2 - W**T * DO 90 J = 1, K DO 80 I = 1, LASTC C( LASTV-K+J, I ) = C( LASTV-K+J, I ) - WORK(I, J) 80 CONTINUE 90 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**T where C = ( C1 C2 ) * LASTV = MAX( K, ILADLR( N, K, V, LDV ) ) LASTC = ILADLR( M, LASTV, C, LDC ) * * W := C * V = (C1*V1 + C2*V2) (stored in WORK) * * W := C2 * DO 100 J = 1, K CALL DCOPY( LASTC, C( 1, N-K+J ), 1, WORK( 1, J ), 1 ) 100 CONTINUE * * W := W * V2 * CALL DTRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1 * V1 * CALL DGEMM( 'No transpose', 'No transpose', $ LASTC, K, LASTV-K, ONE, C, LDC, V, LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**T * CALL DTRMM( 'Right', 'Lower', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V**T * IF( LASTV.GT.K ) THEN * * C1 := C1 - W * V1**T * CALL DGEMM( 'No transpose', 'Transpose', $ LASTC, LASTV-K, K, -ONE, WORK, LDWORK, V, LDV, $ ONE, C, LDC ) END IF * * W := W * V2**T * CALL DTRMM( 'Right', 'Upper', 'Transpose', 'Unit', $ LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) * * C2 := C2 - W * DO 120 J = 1, K DO 110 I = 1, LASTC C( I, LASTV-K+J ) = C( I, LASTV-K+J ) - WORK(I, J) 110 CONTINUE 120 CONTINUE END IF END IF * ELSE IF( LSAME( STOREV, 'R' ) ) THEN * IF( LSAME( DIRECT, 'F' ) ) THEN * * Let V = ( V1 V2 ) (V1: first K columns) * where V1 is unit upper triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**T * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILADLC( K, M, V, LDV ) ) LASTC = ILADLC( LASTV, N, C, LDC ) * * W := C**T * V**T = (C1**T * V1**T + C2**T * V2**T) (stored in WORK) * * W := C1**T * DO 130 J = 1, K CALL DCOPY( LASTC, C( J, 1 ), LDC, WORK( 1, J ), 1 ) 130 CONTINUE * * W := W * V1**T * CALL DTRMM( 'Right', 'Upper', 'Transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2**T*V2**T * CALL DGEMM( 'Transpose', 'Transpose', $ LASTC, K, LASTV-K, $ ONE, C( K+1, 1 ), LDC, V( 1, K+1 ), LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T**T or W * T * CALL DTRMM( 'Right', 'Upper', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V**T * W**T * IF( LASTV.GT.K ) THEN * * C2 := C2 - V2**T * W**T * CALL DGEMM( 'Transpose', 'Transpose', $ LASTV-K, LASTC, K, $ -ONE, V( 1, K+1 ), LDV, WORK, LDWORK, $ ONE, C( K+1, 1 ), LDC ) END IF * * W := W * V1 * CALL DTRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W**T * DO 150 J = 1, K DO 140 I = 1, LASTC C( J, I ) = C( J, I ) - WORK( I, J ) 140 CONTINUE 150 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**T where C = ( C1 C2 ) * LASTV = MAX( K, ILADLC( K, N, V, LDV ) ) LASTC = ILADLR( M, LASTV, C, LDC ) * * W := C * V**T = (C1*V1**T + C2*V2**T) (stored in WORK) * * W := C1 * DO 160 J = 1, K CALL DCOPY( LASTC, C( 1, J ), 1, WORK( 1, J ), 1 ) 160 CONTINUE * * W := W * V1**T * CALL DTRMM( 'Right', 'Upper', 'Transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2 * V2**T * CALL DGEMM( 'No transpose', 'Transpose', $ LASTC, K, LASTV-K, $ ONE, C( 1, K+1 ), LDC, V( 1, K+1 ), LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**T * CALL DTRMM( 'Right', 'Upper', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V * IF( LASTV.GT.K ) THEN * * C2 := C2 - W * V2 * CALL DGEMM( 'No transpose', 'No transpose', $ LASTC, LASTV-K, K, $ -ONE, WORK, LDWORK, V( 1, K+1 ), LDV, $ ONE, C( 1, K+1 ), LDC ) END IF * * W := W * V1 * CALL DTRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W * DO 180 J = 1, K DO 170 I = 1, LASTC C( I, J ) = C( I, J ) - WORK( I, J ) 170 CONTINUE 180 CONTINUE * END IF * ELSE * * Let V = ( V1 V2 ) (V2: last K columns) * where V2 is unit lower triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**T * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILADLC( K, M, V, LDV ) ) LASTC = ILADLC( LASTV, N, C, LDC ) * * W := C**T * V**T = (C1**T * V1**T + C2**T * V2**T) (stored in WORK) * * W := C2**T * DO 190 J = 1, K CALL DCOPY( LASTC, C( LASTV-K+J, 1 ), LDC, $ WORK( 1, J ), 1 ) 190 CONTINUE * * W := W * V2**T * CALL DTRMM( 'Right', 'Lower', 'Transpose', 'Unit', $ LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1**T * V1**T * CALL DGEMM( 'Transpose', 'Transpose', $ LASTC, K, LASTV-K, ONE, C, LDC, V, LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T**T or W * T * CALL DTRMM( 'Right', 'Lower', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V**T * W**T * IF( LASTV.GT.K ) THEN * * C1 := C1 - V1**T * W**T * CALL DGEMM( 'Transpose', 'Transpose', $ LASTV-K, LASTC, K, -ONE, V, LDV, WORK, LDWORK, $ ONE, C, LDC ) END IF * * W := W * V2 * CALL DTRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) * * C2 := C2 - W**T * DO 210 J = 1, K DO 200 I = 1, LASTC C( LASTV-K+J, I ) = C( LASTV-K+J, I ) - WORK(I, J) 200 CONTINUE 210 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**T where C = ( C1 C2 ) * LASTV = MAX( K, ILADLC( K, N, V, LDV ) ) LASTC = ILADLR( M, LASTV, C, LDC ) * * W := C * V**T = (C1*V1**T + C2*V2**T) (stored in WORK) * * W := C2 * DO 220 J = 1, K CALL DCOPY( LASTC, C( 1, LASTV-K+J ), 1, $ WORK( 1, J ), 1 ) 220 CONTINUE * * W := W * V2**T * CALL DTRMM( 'Right', 'Lower', 'Transpose', 'Unit', $ LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1 * V1**T * CALL DGEMM( 'No transpose', 'Transpose', $ LASTC, K, LASTV-K, ONE, C, LDC, V, LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**T * CALL DTRMM( 'Right', 'Lower', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V * IF( LASTV.GT.K ) THEN * * C1 := C1 - W * V1 * CALL DGEMM( 'No transpose', 'No transpose', $ LASTC, LASTV-K, K, -ONE, WORK, LDWORK, V, LDV, $ ONE, C, LDC ) END IF * * W := W * V2 * CALL DTRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) * * C1 := C1 - W * DO 240 J = 1, K DO 230 I = 1, LASTC C( I, LASTV-K+J ) = C( I, LASTV-K+J ) - WORK(I, J) 230 CONTINUE 240 CONTINUE * END IF * END IF END IF * RETURN * * End of DLARFB * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/dlarfg.f ================================================ *> \brief \b DLARFG * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download DLARFG + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE DLARFG( N, ALPHA, X, INCX, TAU ) * * .. Scalar Arguments .. * INTEGER INCX, N * DOUBLE PRECISION ALPHA, TAU * .. * .. Array Arguments .. * DOUBLE PRECISION X( * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> DLARFG generates a real elementary reflector H of order n, such *> that *> *> H * ( alpha ) = ( beta ), H**T * H = I. *> ( x ) ( 0 ) *> *> where alpha and beta are scalars, and x is an (n-1)-element real *> vector. H is represented in the form *> *> H = I - tau * ( 1 ) * ( 1 v**T ) , *> ( v ) *> *> where tau is a real scalar and v is a real (n-1)-element *> vector. *> *> If the elements of x are all zero, then tau = 0 and H is taken to be *> the unit matrix. *> *> Otherwise 1 <= tau <= 2. *> \endverbatim * * Arguments: * ========== * *> \param[in] N *> \verbatim *> N is INTEGER *> The order of the elementary reflector. *> \endverbatim *> *> \param[in,out] ALPHA *> \verbatim *> ALPHA is DOUBLE PRECISION *> On entry, the value alpha. *> On exit, it is overwritten with the value beta. *> \endverbatim *> *> \param[in,out] X *> \verbatim *> X is DOUBLE PRECISION array, dimension *> (1+(N-2)*abs(INCX)) *> On entry, the vector x. *> On exit, it is overwritten with the vector v. *> \endverbatim *> *> \param[in] INCX *> \verbatim *> INCX is INTEGER *> The increment between elements of X. INCX > 0. *> \endverbatim *> *> \param[out] TAU *> \verbatim *> TAU is DOUBLE PRECISION *> The value tau. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup doubleOTHERauxiliary * * ===================================================================== SUBROUTINE DLARFG( N, ALPHA, X, INCX, TAU ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. INTEGER INCX, N DOUBLE PRECISION ALPHA, TAU * .. * .. Array Arguments .. DOUBLE PRECISION X( * ) * .. * * ===================================================================== * * .. Parameters .. DOUBLE PRECISION ONE, ZERO PARAMETER ( ONE = 1.0D+0, ZERO = 0.0D+0 ) * .. * .. Local Scalars .. INTEGER J, KNT DOUBLE PRECISION BETA, RSAFMN, SAFMIN, XNORM * .. * .. External Functions .. DOUBLE PRECISION DLAMCH, DLAPY2, DNRM2 EXTERNAL DLAMCH, DLAPY2, DNRM2 * .. * .. Intrinsic Functions .. INTRINSIC ABS, SIGN * .. * .. External Subroutines .. EXTERNAL DSCAL * .. * .. Executable Statements .. * IF( N.LE.1 ) THEN TAU = ZERO RETURN END IF * XNORM = DNRM2( N-1, X, INCX ) * IF( XNORM.EQ.ZERO ) THEN * * H = I * TAU = ZERO ELSE * * general case * BETA = -SIGN( DLAPY2( ALPHA, XNORM ), ALPHA ) SAFMIN = DLAMCH( 'S' ) / DLAMCH( 'E' ) KNT = 0 IF( ABS( BETA ).LT.SAFMIN ) THEN * * XNORM, BETA may be inaccurate; scale X and recompute them * RSAFMN = ONE / SAFMIN 10 CONTINUE KNT = KNT + 1 CALL DSCAL( N-1, RSAFMN, X, INCX ) BETA = BETA*RSAFMN ALPHA = ALPHA*RSAFMN IF( ABS( BETA ).LT.SAFMIN ) $ GO TO 10 * * New BETA is at most 1, at least SAFMIN * XNORM = DNRM2( N-1, X, INCX ) BETA = -SIGN( DLAPY2( ALPHA, XNORM ), ALPHA ) END IF TAU = ( BETA-ALPHA ) / BETA CALL DSCAL( N-1, ONE / ( ALPHA-BETA ), X, INCX ) * * If ALPHA is subnormal, it may lose relative accuracy * DO 20 J = 1, KNT BETA = BETA*SAFMIN 20 CONTINUE ALPHA = BETA END IF * RETURN * * End of DLARFG * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/dlarft.f ================================================ *> \brief \b DLARFT * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download DLARFT + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE DLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) * * .. Scalar Arguments .. * CHARACTER DIRECT, STOREV * INTEGER K, LDT, LDV, N * .. * .. Array Arguments .. * DOUBLE PRECISION T( LDT, * ), TAU( * ), V( LDV, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> DLARFT forms the triangular factor T of a real block reflector H *> of order n, which is defined as a product of k elementary reflectors. *> *> If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; *> *> If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. *> *> If STOREV = 'C', the vector which defines the elementary reflector *> H(i) is stored in the i-th column of the array V, and *> *> H = I - V * T * V**T *> *> If STOREV = 'R', the vector which defines the elementary reflector *> H(i) is stored in the i-th row of the array V, and *> *> H = I - V**T * T * V *> \endverbatim * * Arguments: * ========== * *> \param[in] DIRECT *> \verbatim *> DIRECT is CHARACTER*1 *> Specifies the order in which the elementary reflectors are *> multiplied to form the block reflector: *> = 'F': H = H(1) H(2) . . . H(k) (Forward) *> = 'B': H = H(k) . . . H(2) H(1) (Backward) *> \endverbatim *> *> \param[in] STOREV *> \verbatim *> STOREV is CHARACTER*1 *> Specifies how the vectors which define the elementary *> reflectors are stored (see also Further Details): *> = 'C': columnwise *> = 'R': rowwise *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The order of the block reflector H. N >= 0. *> \endverbatim *> *> \param[in] K *> \verbatim *> K is INTEGER *> The order of the triangular factor T (= the number of *> elementary reflectors). K >= 1. *> \endverbatim *> *> \param[in] V *> \verbatim *> V is DOUBLE PRECISION array, dimension *> (LDV,K) if STOREV = 'C' *> (LDV,N) if STOREV = 'R' *> The matrix V. See further details. *> \endverbatim *> *> \param[in] LDV *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V. *> If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. *> \endverbatim *> *> \param[in] TAU *> \verbatim *> TAU is DOUBLE PRECISION array, dimension (K) *> TAU(i) must contain the scalar factor of the elementary *> reflector H(i). *> \endverbatim *> *> \param[out] T *> \verbatim *> T is DOUBLE PRECISION array, dimension (LDT,K) *> The k by k triangular factor T of the block reflector. *> If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is *> lower triangular. The rest of the array is not used. *> \endverbatim *> *> \param[in] LDT *> \verbatim *> LDT is INTEGER *> The leading dimension of the array T. LDT >= K. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date April 2012 * *> \ingroup doubleOTHERauxiliary * *> \par Further Details: * ===================== *> *> \verbatim *> *> The shape of the matrix V and the storage of the vectors which define *> the H(i) is best illustrated by the following example with n = 5 and *> k = 3. The elements equal to 1 are not stored. *> *> DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': *> *> V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) *> ( v1 1 ) ( 1 v2 v2 v2 ) *> ( v1 v2 1 ) ( 1 v3 v3 ) *> ( v1 v2 v3 ) *> ( v1 v2 v3 ) *> *> DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': *> *> V = ( v1 v2 v3 ) V = ( v1 v1 1 ) *> ( v1 v2 v3 ) ( v2 v2 v2 1 ) *> ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) *> ( 1 v3 ) *> ( 1 ) *> \endverbatim *> * ===================================================================== SUBROUTINE DLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) * * -- LAPACK auxiliary routine (version 3.4.1) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * April 2012 * * .. Scalar Arguments .. CHARACTER DIRECT, STOREV INTEGER K, LDT, LDV, N * .. * .. Array Arguments .. DOUBLE PRECISION T( LDT, * ), TAU( * ), V( LDV, * ) * .. * * ===================================================================== * * .. Parameters .. DOUBLE PRECISION ONE, ZERO PARAMETER ( ONE = 1.0D+0, ZERO = 0.0D+0 ) * .. * .. Local Scalars .. INTEGER I, J, PREVLASTV, LASTV * .. * .. External Subroutines .. EXTERNAL DGEMV, DTRMV * .. * .. External Functions .. LOGICAL LSAME EXTERNAL LSAME * .. * .. Executable Statements .. * * Quick return if possible * IF( N.EQ.0 ) $ RETURN * IF( LSAME( DIRECT, 'F' ) ) THEN PREVLASTV = N DO I = 1, K PREVLASTV = MAX( I, PREVLASTV ) IF( TAU( I ).EQ.ZERO ) THEN * * H(i) = I * DO J = 1, I T( J, I ) = ZERO END DO ELSE * * general case * IF( LSAME( STOREV, 'C' ) ) THEN * Skip any trailing zeros. DO LASTV = N, I+1, -1 IF( V( LASTV, I ).NE.ZERO ) EXIT END DO DO J = 1, I-1 T( J, I ) = -TAU( I ) * V( I , J ) END DO J = MIN( LASTV, PREVLASTV ) * * T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)**T * V(i:j,i) * CALL DGEMV( 'Transpose', J-I, I-1, -TAU( I ), $ V( I+1, 1 ), LDV, V( I+1, I ), 1, ONE, $ T( 1, I ), 1 ) ELSE * Skip any trailing zeros. DO LASTV = N, I+1, -1 IF( V( I, LASTV ).NE.ZERO ) EXIT END DO DO J = 1, I-1 T( J, I ) = -TAU( I ) * V( J , I ) END DO J = MIN( LASTV, PREVLASTV ) * * T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)**T * CALL DGEMV( 'No transpose', I-1, J-I, -TAU( I ), $ V( 1, I+1 ), LDV, V( I, I+1 ), LDV, ONE, $ T( 1, I ), 1 ) END IF * * T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) * CALL DTRMV( 'Upper', 'No transpose', 'Non-unit', I-1, T, $ LDT, T( 1, I ), 1 ) T( I, I ) = TAU( I ) IF( I.GT.1 ) THEN PREVLASTV = MAX( PREVLASTV, LASTV ) ELSE PREVLASTV = LASTV END IF END IF END DO ELSE PREVLASTV = 1 DO I = K, 1, -1 IF( TAU( I ).EQ.ZERO ) THEN * * H(i) = I * DO J = I, K T( J, I ) = ZERO END DO ELSE * * general case * IF( I.LT.K ) THEN IF( LSAME( STOREV, 'C' ) ) THEN * Skip any leading zeros. DO LASTV = 1, I-1 IF( V( LASTV, I ).NE.ZERO ) EXIT END DO DO J = I+1, K T( J, I ) = -TAU( I ) * V( N-K+I , J ) END DO J = MAX( LASTV, PREVLASTV ) * * T(i+1:k,i) = -tau(i) * V(j:n-k+i,i+1:k)**T * V(j:n-k+i,i) * CALL DGEMV( 'Transpose', N-K+I-J, K-I, -TAU( I ), $ V( J, I+1 ), LDV, V( J, I ), 1, ONE, $ T( I+1, I ), 1 ) ELSE * Skip any leading zeros. DO LASTV = 1, I-1 IF( V( I, LASTV ).NE.ZERO ) EXIT END DO DO J = I+1, K T( J, I ) = -TAU( I ) * V( J, N-K+I ) END DO J = MAX( LASTV, PREVLASTV ) * * T(i+1:k,i) = -tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)**T * CALL DGEMV( 'No transpose', K-I, N-K+I-J, $ -TAU( I ), V( I+1, J ), LDV, V( I, J ), LDV, $ ONE, T( I+1, I ), 1 ) END IF * * T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) * CALL DTRMV( 'Lower', 'No transpose', 'Non-unit', K-I, $ T( I+1, I+1 ), LDT, T( I+1, I ), 1 ) IF( I.GT.1 ) THEN PREVLASTV = MIN( PREVLASTV, LASTV ) ELSE PREVLASTV = LASTV END IF END IF T( I, I ) = TAU( I ) END IF END DO END IF RETURN * * End of DLARFT * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/double.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define SCALAR double #define SCALAR_SUFFIX d #define SCALAR_SUFFIX_UP "D" #define ISCOMPLEX 0 #include "cholesky.cpp" #include "lu.cpp" #include "eigenvalues.cpp" #include "svd.cpp" ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/dsecnd_NONE.f ================================================ *> \brief \b DSECND returns nothing * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * * Definition: * =========== * * DOUBLE PRECISION FUNCTION DSECND( ) * * *> \par Purpose: * ============= *> *> \verbatim *> *> DSECND returns nothing instead of returning the user time for a process in seconds. *> If you are using that routine, it means that neither EXTERNAL ETIME, *> EXTERNAL ETIME_, INTERNAL ETIME, INTERNAL CPU_TIME is available on *> your machine. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup auxOTHERauxiliary * * ===================================================================== DOUBLE PRECISION FUNCTION DSECND( ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * ===================================================================== * DSECND = 0.0D+0 RETURN * * End of DSECND * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/eigenvalues.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "lapack_common.h" #include // computes eigen values and vectors of a general N-by-N matrix A EIGEN_LAPACK_FUNC(syev,(char *jobz, char *uplo, int* n, Scalar* a, int *lda, Scalar* w, Scalar* /*work*/, int* lwork, int *info)) { // TODO exploit the work buffer bool query_size = *lwork==-1; *info = 0; if(*jobz!='N' && *jobz!='V') *info = -1; else if(UPLO(*uplo)==INVALID) *info = -2; else if(*n<0) *info = -3; else if(*lda eig(mat,computeVectors?ComputeEigenvectors:EigenvaluesOnly); if(eig.info()==NoConvergence) { make_vector(w,*n).setZero(); if(computeVectors) matrix(a,*n,*n,*lda).setIdentity(); //*info = 1; return 0; } make_vector(w,*n) = eig.eigenvalues(); if(computeVectors) matrix(a,*n,*n,*lda) = eig.eigenvectors(); return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/ilaclc.f ================================================ *> \brief \b ILACLC * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ILACLC + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * INTEGER FUNCTION ILACLC( M, N, A, LDA ) * * .. Scalar Arguments .. * INTEGER M, N, LDA * .. * .. Array Arguments .. * COMPLEX A( LDA, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ILACLC scans A for its last non-zero column. *> \endverbatim * * Arguments: * ========== * *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix A. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix A. *> \endverbatim *> *> \param[in] A *> \verbatim *> A is COMPLEX array, dimension (LDA,N) *> The m by n matrix A. *> \endverbatim *> *> \param[in] LDA *> \verbatim *> LDA is INTEGER *> The leading dimension of the array A. LDA >= max(1,M). *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup complexOTHERauxiliary * * ===================================================================== INTEGER FUNCTION ILACLC( M, N, A, LDA ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. INTEGER M, N, LDA * .. * .. Array Arguments .. COMPLEX A( LDA, * ) * .. * * ===================================================================== * * .. Parameters .. COMPLEX ZERO PARAMETER ( ZERO = (0.0E+0, 0.0E+0) ) * .. * .. Local Scalars .. INTEGER I * .. * .. Executable Statements .. * * Quick test for the common case where one corner is non-zero. IF( N.EQ.0 ) THEN ILACLC = N ELSE IF( A(1, N).NE.ZERO .OR. A(M, N).NE.ZERO ) THEN ILACLC = N ELSE * Now scan each column from the end, returning with the first non-zero. DO ILACLC = N, 1, -1 DO I = 1, M IF( A(I, ILACLC).NE.ZERO ) RETURN END DO END DO END IF RETURN END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/ilaclr.f ================================================ *> \brief \b ILACLR * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ILACLR + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * INTEGER FUNCTION ILACLR( M, N, A, LDA ) * * .. Scalar Arguments .. * INTEGER M, N, LDA * .. * .. Array Arguments .. * COMPLEX A( LDA, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ILACLR scans A for its last non-zero row. *> \endverbatim * * Arguments: * ========== * *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix A. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix A. *> \endverbatim *> *> \param[in] A *> \verbatim *> A is array, dimension (LDA,N) *> The m by n matrix A. *> \endverbatim *> *> \param[in] LDA *> \verbatim *> LDA is INTEGER *> The leading dimension of the array A. LDA >= max(1,M). *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date April 2012 * *> \ingroup complexOTHERauxiliary * * ===================================================================== INTEGER FUNCTION ILACLR( M, N, A, LDA ) * * -- LAPACK auxiliary routine (version 3.4.1) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * April 2012 * * .. Scalar Arguments .. INTEGER M, N, LDA * .. * .. Array Arguments .. COMPLEX A( LDA, * ) * .. * * ===================================================================== * * .. Parameters .. COMPLEX ZERO PARAMETER ( ZERO = (0.0E+0, 0.0E+0) ) * .. * .. Local Scalars .. INTEGER I, J * .. * .. Executable Statements .. * * Quick test for the common case where one corner is non-zero. IF( M.EQ.0 ) THEN ILACLR = M ELSE IF( A(M, 1).NE.ZERO .OR. A(M, N).NE.ZERO ) THEN ILACLR = M ELSE * Scan up each column tracking the last zero row seen. ILACLR = 0 DO J = 1, N I=M DO WHILE((A(MAX(I,1),J).EQ.ZERO).AND.(I.GE.1)) I=I-1 ENDDO ILACLR = MAX( ILACLR, I ) END DO END IF RETURN END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/iladlc.f ================================================ *> \brief \b ILADLC * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ILADLC + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * INTEGER FUNCTION ILADLC( M, N, A, LDA ) * * .. Scalar Arguments .. * INTEGER M, N, LDA * .. * .. Array Arguments .. * DOUBLE PRECISION A( LDA, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ILADLC scans A for its last non-zero column. *> \endverbatim * * Arguments: * ========== * *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix A. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix A. *> \endverbatim *> *> \param[in] A *> \verbatim *> A is DOUBLE PRECISION array, dimension (LDA,N) *> The m by n matrix A. *> \endverbatim *> *> \param[in] LDA *> \verbatim *> LDA is INTEGER *> The leading dimension of the array A. LDA >= max(1,M). *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup auxOTHERauxiliary * * ===================================================================== INTEGER FUNCTION ILADLC( M, N, A, LDA ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. INTEGER M, N, LDA * .. * .. Array Arguments .. DOUBLE PRECISION A( LDA, * ) * .. * * ===================================================================== * * .. Parameters .. DOUBLE PRECISION ZERO PARAMETER ( ZERO = 0.0D+0 ) * .. * .. Local Scalars .. INTEGER I * .. * .. Executable Statements .. * * Quick test for the common case where one corner is non-zero. IF( N.EQ.0 ) THEN ILADLC = N ELSE IF( A(1, N).NE.ZERO .OR. A(M, N).NE.ZERO ) THEN ILADLC = N ELSE * Now scan each column from the end, returning with the first non-zero. DO ILADLC = N, 1, -1 DO I = 1, M IF( A(I, ILADLC).NE.ZERO ) RETURN END DO END DO END IF RETURN END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/iladlr.f ================================================ *> \brief \b ILADLR * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ILADLR + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * INTEGER FUNCTION ILADLR( M, N, A, LDA ) * * .. Scalar Arguments .. * INTEGER M, N, LDA * .. * .. Array Arguments .. * DOUBLE PRECISION A( LDA, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ILADLR scans A for its last non-zero row. *> \endverbatim * * Arguments: * ========== * *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix A. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix A. *> \endverbatim *> *> \param[in] A *> \verbatim *> A is DOUBLE PRECISION array, dimension (LDA,N) *> The m by n matrix A. *> \endverbatim *> *> \param[in] LDA *> \verbatim *> LDA is INTEGER *> The leading dimension of the array A. LDA >= max(1,M). *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date April 2012 * *> \ingroup auxOTHERauxiliary * * ===================================================================== INTEGER FUNCTION ILADLR( M, N, A, LDA ) * * -- LAPACK auxiliary routine (version 3.4.1) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * April 2012 * * .. Scalar Arguments .. INTEGER M, N, LDA * .. * .. Array Arguments .. DOUBLE PRECISION A( LDA, * ) * .. * * ===================================================================== * * .. Parameters .. DOUBLE PRECISION ZERO PARAMETER ( ZERO = 0.0D+0 ) * .. * .. Local Scalars .. INTEGER I, J * .. * .. Executable Statements .. * * Quick test for the common case where one corner is non-zero. IF( M.EQ.0 ) THEN ILADLR = M ELSE IF( A(M, 1).NE.ZERO .OR. A(M, N).NE.ZERO ) THEN ILADLR = M ELSE * Scan up each column tracking the last zero row seen. ILADLR = 0 DO J = 1, N I=M DO WHILE((A(MAX(I,1),J).EQ.ZERO).AND.(I.GE.1)) I=I-1 ENDDO ILADLR = MAX( ILADLR, I ) END DO END IF RETURN END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/ilaslc.f ================================================ *> \brief \b ILASLC * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ILASLC + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * INTEGER FUNCTION ILASLC( M, N, A, LDA ) * * .. Scalar Arguments .. * INTEGER M, N, LDA * .. * .. Array Arguments .. * REAL A( LDA, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ILASLC scans A for its last non-zero column. *> \endverbatim * * Arguments: * ========== * *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix A. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix A. *> \endverbatim *> *> \param[in] A *> \verbatim *> A is REAL array, dimension (LDA,N) *> The m by n matrix A. *> \endverbatim *> *> \param[in] LDA *> \verbatim *> LDA is INTEGER *> The leading dimension of the array A. LDA >= max(1,M). *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup realOTHERauxiliary * * ===================================================================== INTEGER FUNCTION ILASLC( M, N, A, LDA ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. INTEGER M, N, LDA * .. * .. Array Arguments .. REAL A( LDA, * ) * .. * * ===================================================================== * * .. Parameters .. REAL ZERO PARAMETER ( ZERO = 0.0D+0 ) * .. * .. Local Scalars .. INTEGER I * .. * .. Executable Statements .. * * Quick test for the common case where one corner is non-zero. IF( N.EQ.0 ) THEN ILASLC = N ELSE IF( A(1, N).NE.ZERO .OR. A(M, N).NE.ZERO ) THEN ILASLC = N ELSE * Now scan each column from the end, returning with the first non-zero. DO ILASLC = N, 1, -1 DO I = 1, M IF( A(I, ILASLC).NE.ZERO ) RETURN END DO END DO END IF RETURN END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/ilaslr.f ================================================ *> \brief \b ILASLR * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ILASLR + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * INTEGER FUNCTION ILASLR( M, N, A, LDA ) * * .. Scalar Arguments .. * INTEGER M, N, LDA * .. * .. Array Arguments .. * REAL A( LDA, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ILASLR scans A for its last non-zero row. *> \endverbatim * * Arguments: * ========== * *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix A. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix A. *> \endverbatim *> *> \param[in] A *> \verbatim *> A is REAL array, dimension (LDA,N) *> The m by n matrix A. *> \endverbatim *> *> \param[in] LDA *> \verbatim *> LDA is INTEGER *> The leading dimension of the array A. LDA >= max(1,M). *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date April 2012 * *> \ingroup realOTHERauxiliary * * ===================================================================== INTEGER FUNCTION ILASLR( M, N, A, LDA ) * * -- LAPACK auxiliary routine (version 3.4.1) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * April 2012 * * .. Scalar Arguments .. INTEGER M, N, LDA * .. * .. Array Arguments .. REAL A( LDA, * ) * .. * * ===================================================================== * * .. Parameters .. REAL ZERO PARAMETER ( ZERO = 0.0E+0 ) * .. * .. Local Scalars .. INTEGER I, J * .. * .. Executable Statements .. * * Quick test for the common case where one corner is non-zero. IF( M.EQ.0 ) THEN ILASLR = M ELSEIF( A(M, 1).NE.ZERO .OR. A(M, N).NE.ZERO ) THEN ILASLR = M ELSE * Scan up each column tracking the last zero row seen. ILASLR = 0 DO J = 1, N I=M DO WHILE((A(MAX(I,1),J).EQ.ZERO).AND.(I.GE.1)) I=I-1 ENDDO ILASLR = MAX( ILASLR, I ) END DO END IF RETURN END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/ilazlc.f ================================================ *> \brief \b ILAZLC * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ILAZLC + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * INTEGER FUNCTION ILAZLC( M, N, A, LDA ) * * .. Scalar Arguments .. * INTEGER M, N, LDA * .. * .. Array Arguments .. * COMPLEX*16 A( LDA, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ILAZLC scans A for its last non-zero column. *> \endverbatim * * Arguments: * ========== * *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix A. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix A. *> \endverbatim *> *> \param[in] A *> \verbatim *> A is COMPLEX*16 array, dimension (LDA,N) *> The m by n matrix A. *> \endverbatim *> *> \param[in] LDA *> \verbatim *> LDA is INTEGER *> The leading dimension of the array A. LDA >= max(1,M). *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup complex16OTHERauxiliary * * ===================================================================== INTEGER FUNCTION ILAZLC( M, N, A, LDA ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. INTEGER M, N, LDA * .. * .. Array Arguments .. COMPLEX*16 A( LDA, * ) * .. * * ===================================================================== * * .. Parameters .. COMPLEX*16 ZERO PARAMETER ( ZERO = (0.0D+0, 0.0D+0) ) * .. * .. Local Scalars .. INTEGER I * .. * .. Executable Statements .. * * Quick test for the common case where one corner is non-zero. IF( N.EQ.0 ) THEN ILAZLC = N ELSE IF( A(1, N).NE.ZERO .OR. A(M, N).NE.ZERO ) THEN ILAZLC = N ELSE * Now scan each column from the end, returning with the first non-zero. DO ILAZLC = N, 1, -1 DO I = 1, M IF( A(I, ILAZLC).NE.ZERO ) RETURN END DO END DO END IF RETURN END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/ilazlr.f ================================================ *> \brief \b ILAZLR * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ILAZLR + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * INTEGER FUNCTION ILAZLR( M, N, A, LDA ) * * .. Scalar Arguments .. * INTEGER M, N, LDA * .. * .. Array Arguments .. * COMPLEX*16 A( LDA, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ILAZLR scans A for its last non-zero row. *> \endverbatim * * Arguments: * ========== * *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix A. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix A. *> \endverbatim *> *> \param[in] A *> \verbatim *> A is COMPLEX*16 array, dimension (LDA,N) *> The m by n matrix A. *> \endverbatim *> *> \param[in] LDA *> \verbatim *> LDA is INTEGER *> The leading dimension of the array A. LDA >= max(1,M). *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date April 2012 * *> \ingroup complex16OTHERauxiliary * * ===================================================================== INTEGER FUNCTION ILAZLR( M, N, A, LDA ) * * -- LAPACK auxiliary routine (version 3.4.1) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * April 2012 * * .. Scalar Arguments .. INTEGER M, N, LDA * .. * .. Array Arguments .. COMPLEX*16 A( LDA, * ) * .. * * ===================================================================== * * .. Parameters .. COMPLEX*16 ZERO PARAMETER ( ZERO = (0.0D+0, 0.0D+0) ) * .. * .. Local Scalars .. INTEGER I, J * .. * .. Executable Statements .. * * Quick test for the common case where one corner is non-zero. IF( M.EQ.0 ) THEN ILAZLR = M ELSE IF( A(M, 1).NE.ZERO .OR. A(M, N).NE.ZERO ) THEN ILAZLR = M ELSE * Scan up each column tracking the last zero row seen. ILAZLR = 0 DO J = 1, N I=M DO WHILE((A(MAX(I,1),J).EQ.ZERO).AND.(I.GE.1)) I=I-1 ENDDO ILAZLR = MAX( ILAZLR, I ) END DO END IF RETURN END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/lapack_common.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LAPACK_COMMON_H #define EIGEN_LAPACK_COMMON_H #include "../blas/common.h" #include "../Eigen/src/misc/lapack.h" #define EIGEN_LAPACK_FUNC(FUNC,ARGLIST) \ extern "C" { int EIGEN_BLAS_FUNC(FUNC) ARGLIST; } \ int EIGEN_BLAS_FUNC(FUNC) ARGLIST typedef Eigen::Map > PivotsType; #if ISCOMPLEX #define EIGEN_LAPACK_ARG_IF_COMPLEX(X) X, #else #define EIGEN_LAPACK_ARG_IF_COMPLEX(X) #endif #endif // EIGEN_LAPACK_COMMON_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/lu.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "common.h" #include // computes an LU factorization of a general M-by-N matrix A using partial pivoting with row interchanges EIGEN_LAPACK_FUNC(getrf,(int *m, int *n, RealScalar *pa, int *lda, int *ipiv, int *info)) { *info = 0; if(*m<0) *info = -1; else if(*n<0) *info = -2; else if(*lda(pa); int nb_transpositions; int ret = int(Eigen::internal::partial_lu_impl ::blocked_lu(*m, *n, a, *lda, ipiv, nb_transpositions)); for(int i=0; i=0) *info = ret+1; return 0; } //GETRS solves a system of linear equations // A * X = B or A' * X = B // with a general N-by-N matrix A using the LU factorization computed by GETRF EIGEN_LAPACK_FUNC(getrs,(char *trans, int *n, int *nrhs, RealScalar *pa, int *lda, int *ipiv, RealScalar *pb, int *ldb, int *info)) { *info = 0; if(OP(*trans)==INVALID) *info = -1; else if(*n<0) *info = -2; else if(*nrhs<0) *info = -3; else if(*lda(pa); Scalar* b = reinterpret_cast(pb); MatrixType lu(a,*n,*n,*lda); MatrixType B(b,*n,*nrhs,*ldb); for(int i=0; i<*n; ++i) ipiv[i]--; if(OP(*trans)==NOTR) { B = PivotsType(ipiv,*n) * B; lu.triangularView().solveInPlace(B); lu.triangularView().solveInPlace(B); } else if(OP(*trans)==TR) { lu.triangularView().transpose().solveInPlace(B); lu.triangularView().transpose().solveInPlace(B); B = PivotsType(ipiv,*n).transpose() * B; } else if(OP(*trans)==ADJ) { lu.triangularView().adjoint().solveInPlace(B); lu.triangularView().adjoint().solveInPlace(B); B = PivotsType(ipiv,*n).transpose() * B; } for(int i=0; i<*n; ++i) ipiv[i]++; return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/second_NONE.f ================================================ *> \brief \b SECOND returns nothing * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * * Definition: * =========== * * REAL FUNCTION SECOND( ) * * *> \par Purpose: * ============= *> *> \verbatim *> *> SECOND returns nothing instead of returning the user time for a process in seconds. *> If you are using that routine, it means that neither EXTERNAL ETIME, *> EXTERNAL ETIME_, INTERNAL ETIME, INTERNAL CPU_TIME is available on *> your machine. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup auxOTHERauxiliary * * ===================================================================== REAL FUNCTION SECOND( ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * ===================================================================== * SECOND = 0.0E+0 RETURN * * End of SECOND * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/single.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define SCALAR float #define SCALAR_SUFFIX s #define SCALAR_SUFFIX_UP "S" #define ISCOMPLEX 0 #include "cholesky.cpp" #include "lu.cpp" #include "eigenvalues.cpp" #include "svd.cpp" ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/sladiv.f ================================================ *> \brief \b SLADIV * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download SLADIV + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE SLADIV( A, B, C, D, P, Q ) * * .. Scalar Arguments .. * REAL A, B, C, D, P, Q * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> SLADIV performs complex division in real arithmetic *> *> a + i*b *> p + i*q = --------- *> c + i*d *> *> The algorithm is due to Robert L. Smith and can be found *> in D. Knuth, The art of Computer Programming, Vol.2, p.195 *> \endverbatim * * Arguments: * ========== * *> \param[in] A *> \verbatim *> A is REAL *> \endverbatim *> *> \param[in] B *> \verbatim *> B is REAL *> \endverbatim *> *> \param[in] C *> \verbatim *> C is REAL *> \endverbatim *> *> \param[in] D *> \verbatim *> D is REAL *> The scalars a, b, c, and d in the above expression. *> \endverbatim *> *> \param[out] P *> \verbatim *> P is REAL *> \endverbatim *> *> \param[out] Q *> \verbatim *> Q is REAL *> The scalars p and q in the above expression. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup auxOTHERauxiliary * * ===================================================================== SUBROUTINE SLADIV( A, B, C, D, P, Q ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. REAL A, B, C, D, P, Q * .. * * ===================================================================== * * .. Local Scalars .. REAL E, F * .. * .. Intrinsic Functions .. INTRINSIC ABS * .. * .. Executable Statements .. * IF( ABS( D ).LT.ABS( C ) ) THEN E = D / C F = C + D*E P = ( A+B*E ) / F Q = ( B-A*E ) / F ELSE E = C / D F = D + C*E P = ( B+A*E ) / F Q = ( -A+B*E ) / F END IF * RETURN * * End of SLADIV * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/slamch.f ================================================ *> \brief \b SLAMCH * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * * Definition: * =========== * * REAL FUNCTION SLAMCH( CMACH ) * * .. Scalar Arguments .. * CHARACTER CMACH * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> SLAMCH determines single precision machine parameters. *> \endverbatim * * Arguments: * ========== * *> \param[in] CMACH *> \verbatim *> Specifies the value to be returned by SLAMCH: *> = 'E' or 'e', SLAMCH := eps *> = 'S' or 's , SLAMCH := sfmin *> = 'B' or 'b', SLAMCH := base *> = 'P' or 'p', SLAMCH := eps*base *> = 'N' or 'n', SLAMCH := t *> = 'R' or 'r', SLAMCH := rnd *> = 'M' or 'm', SLAMCH := emin *> = 'U' or 'u', SLAMCH := rmin *> = 'L' or 'l', SLAMCH := emax *> = 'O' or 'o', SLAMCH := rmax *> where *> eps = relative machine precision *> sfmin = safe minimum, such that 1/sfmin does not overflow *> base = base of the machine *> prec = eps*base *> t = number of (base) digits in the mantissa *> rnd = 1.0 when rounding occurs in addition, 0.0 otherwise *> emin = minimum exponent before (gradual) underflow *> rmin = underflow threshold - base**(emin-1) *> emax = largest exponent before overflow *> rmax = overflow threshold - (base**emax)*(1-eps) *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup auxOTHERauxiliary * * ===================================================================== REAL FUNCTION SLAMCH( CMACH ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. CHARACTER CMACH * .. * * ===================================================================== * * .. Parameters .. REAL ONE, ZERO PARAMETER ( ONE = 1.0E+0, ZERO = 0.0E+0 ) * .. * .. Local Scalars .. REAL RND, EPS, SFMIN, SMALL, RMACH * .. * .. External Functions .. LOGICAL LSAME EXTERNAL LSAME * .. * .. Intrinsic Functions .. INTRINSIC DIGITS, EPSILON, HUGE, MAXEXPONENT, $ MINEXPONENT, RADIX, TINY * .. * .. Executable Statements .. * * * Assume rounding, not chopping. Always. * RND = ONE * IF( ONE.EQ.RND ) THEN EPS = EPSILON(ZERO) * 0.5 ELSE EPS = EPSILON(ZERO) END IF * IF( LSAME( CMACH, 'E' ) ) THEN RMACH = EPS ELSE IF( LSAME( CMACH, 'S' ) ) THEN SFMIN = TINY(ZERO) SMALL = ONE / HUGE(ZERO) IF( SMALL.GE.SFMIN ) THEN * * Use SMALL plus a bit, to avoid the possibility of rounding * causing overflow when computing 1/sfmin. * SFMIN = SMALL*( ONE+EPS ) END IF RMACH = SFMIN ELSE IF( LSAME( CMACH, 'B' ) ) THEN RMACH = RADIX(ZERO) ELSE IF( LSAME( CMACH, 'P' ) ) THEN RMACH = EPS * RADIX(ZERO) ELSE IF( LSAME( CMACH, 'N' ) ) THEN RMACH = DIGITS(ZERO) ELSE IF( LSAME( CMACH, 'R' ) ) THEN RMACH = RND ELSE IF( LSAME( CMACH, 'M' ) ) THEN RMACH = MINEXPONENT(ZERO) ELSE IF( LSAME( CMACH, 'U' ) ) THEN RMACH = tiny(zero) ELSE IF( LSAME( CMACH, 'L' ) ) THEN RMACH = MAXEXPONENT(ZERO) ELSE IF( LSAME( CMACH, 'O' ) ) THEN RMACH = HUGE(ZERO) ELSE RMACH = ZERO END IF * SLAMCH = RMACH RETURN * * End of SLAMCH * END ************************************************************************ *> \brief \b SLAMC3 *> \details *> \b Purpose: *> \verbatim *> SLAMC3 is intended to force A and B to be stored prior to doing *> the addition of A and B , for use in situations where optimizers *> might hold one of these in a register. *> \endverbatim *> \author LAPACK is a software package provided by Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. *> \date November 2011 *> \ingroup auxOTHERauxiliary *> *> \param[in] A *> \verbatim *> \endverbatim *> *> \param[in] B *> \verbatim *> The values A and B. *> \endverbatim *> * REAL FUNCTION SLAMC3( A, B ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. * November 2010 * * .. Scalar Arguments .. REAL A, B * .. * ===================================================================== * * .. Executable Statements .. * SLAMC3 = A + B * RETURN * * End of SLAMC3 * END * ************************************************************************ ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/slapy2.f ================================================ *> \brief \b SLAPY2 * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download SLAPY2 + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * REAL FUNCTION SLAPY2( X, Y ) * * .. Scalar Arguments .. * REAL X, Y * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> SLAPY2 returns sqrt(x**2+y**2), taking care not to cause unnecessary *> overflow. *> \endverbatim * * Arguments: * ========== * *> \param[in] X *> \verbatim *> X is REAL *> \endverbatim *> *> \param[in] Y *> \verbatim *> Y is REAL *> X and Y specify the values x and y. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup auxOTHERauxiliary * * ===================================================================== REAL FUNCTION SLAPY2( X, Y ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. REAL X, Y * .. * * ===================================================================== * * .. Parameters .. REAL ZERO PARAMETER ( ZERO = 0.0E0 ) REAL ONE PARAMETER ( ONE = 1.0E0 ) * .. * .. Local Scalars .. REAL W, XABS, YABS, Z * .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, MIN, SQRT * .. * .. Executable Statements .. * XABS = ABS( X ) YABS = ABS( Y ) W = MAX( XABS, YABS ) Z = MIN( XABS, YABS ) IF( Z.EQ.ZERO ) THEN SLAPY2 = W ELSE SLAPY2 = W*SQRT( ONE+( Z / W )**2 ) END IF RETURN * * End of SLAPY2 * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/slapy3.f ================================================ *> \brief \b SLAPY3 * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download SLAPY3 + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * REAL FUNCTION SLAPY3( X, Y, Z ) * * .. Scalar Arguments .. * REAL X, Y, Z * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> SLAPY3 returns sqrt(x**2+y**2+z**2), taking care not to cause *> unnecessary overflow. *> \endverbatim * * Arguments: * ========== * *> \param[in] X *> \verbatim *> X is REAL *> \endverbatim *> *> \param[in] Y *> \verbatim *> Y is REAL *> \endverbatim *> *> \param[in] Z *> \verbatim *> Z is REAL *> X, Y and Z specify the values x, y and z. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup auxOTHERauxiliary * * ===================================================================== REAL FUNCTION SLAPY3( X, Y, Z ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. REAL X, Y, Z * .. * * ===================================================================== * * .. Parameters .. REAL ZERO PARAMETER ( ZERO = 0.0E0 ) * .. * .. Local Scalars .. REAL W, XABS, YABS, ZABS * .. * .. Intrinsic Functions .. INTRINSIC ABS, MAX, SQRT * .. * .. Executable Statements .. * XABS = ABS( X ) YABS = ABS( Y ) ZABS = ABS( Z ) W = MAX( XABS, YABS, ZABS ) IF( W.EQ.ZERO ) THEN * W can be zero for max(0,nan,0) * adding all three entries together will make sure * NaN will not disappear. SLAPY3 = XABS + YABS + ZABS ELSE SLAPY3 = W*SQRT( ( XABS / W )**2+( YABS / W )**2+ $ ( ZABS / W )**2 ) END IF RETURN * * End of SLAPY3 * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/slarf.f ================================================ *> \brief \b SLARF * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download SLARF + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE SLARF( SIDE, M, N, V, INCV, TAU, C, LDC, WORK ) * * .. Scalar Arguments .. * CHARACTER SIDE * INTEGER INCV, LDC, M, N * REAL TAU * .. * .. Array Arguments .. * REAL C( LDC, * ), V( * ), WORK( * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> SLARF applies a real elementary reflector H to a real m by n matrix *> C, from either the left or the right. H is represented in the form *> *> H = I - tau * v * v**T *> *> where tau is a real scalar and v is a real vector. *> *> If tau = 0, then H is taken to be the unit matrix. *> \endverbatim * * Arguments: * ========== * *> \param[in] SIDE *> \verbatim *> SIDE is CHARACTER*1 *> = 'L': form H * C *> = 'R': form C * H *> \endverbatim *> *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix C. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix C. *> \endverbatim *> *> \param[in] V *> \verbatim *> V is REAL array, dimension *> (1 + (M-1)*abs(INCV)) if SIDE = 'L' *> or (1 + (N-1)*abs(INCV)) if SIDE = 'R' *> The vector v in the representation of H. V is not used if *> TAU = 0. *> \endverbatim *> *> \param[in] INCV *> \verbatim *> INCV is INTEGER *> The increment between elements of v. INCV <> 0. *> \endverbatim *> *> \param[in] TAU *> \verbatim *> TAU is REAL *> The value tau in the representation of H. *> \endverbatim *> *> \param[in,out] C *> \verbatim *> C is REAL array, dimension (LDC,N) *> On entry, the m by n matrix C. *> On exit, C is overwritten by the matrix H * C if SIDE = 'L', *> or C * H if SIDE = 'R'. *> \endverbatim *> *> \param[in] LDC *> \verbatim *> LDC is INTEGER *> The leading dimension of the array C. LDC >= max(1,M). *> \endverbatim *> *> \param[out] WORK *> \verbatim *> WORK is REAL array, dimension *> (N) if SIDE = 'L' *> or (M) if SIDE = 'R' *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup realOTHERauxiliary * * ===================================================================== SUBROUTINE SLARF( SIDE, M, N, V, INCV, TAU, C, LDC, WORK ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. CHARACTER SIDE INTEGER INCV, LDC, M, N REAL TAU * .. * .. Array Arguments .. REAL C( LDC, * ), V( * ), WORK( * ) * .. * * ===================================================================== * * .. Parameters .. REAL ONE, ZERO PARAMETER ( ONE = 1.0E+0, ZERO = 0.0E+0 ) * .. * .. Local Scalars .. LOGICAL APPLYLEFT INTEGER I, LASTV, LASTC * .. * .. External Subroutines .. EXTERNAL SGEMV, SGER * .. * .. External Functions .. LOGICAL LSAME INTEGER ILASLR, ILASLC EXTERNAL LSAME, ILASLR, ILASLC * .. * .. Executable Statements .. * APPLYLEFT = LSAME( SIDE, 'L' ) LASTV = 0 LASTC = 0 IF( TAU.NE.ZERO ) THEN ! Set up variables for scanning V. LASTV begins pointing to the end ! of V. IF( APPLYLEFT ) THEN LASTV = M ELSE LASTV = N END IF IF( INCV.GT.0 ) THEN I = 1 + (LASTV-1) * INCV ELSE I = 1 END IF ! Look for the last non-zero row in V. DO WHILE( LASTV.GT.0 .AND. V( I ).EQ.ZERO ) LASTV = LASTV - 1 I = I - INCV END DO IF( APPLYLEFT ) THEN ! Scan for the last non-zero column in C(1:lastv,:). LASTC = ILASLC(LASTV, N, C, LDC) ELSE ! Scan for the last non-zero row in C(:,1:lastv). LASTC = ILASLR(M, LASTV, C, LDC) END IF END IF ! Note that lastc.eq.0 renders the BLAS operations null; no special ! case is needed at this level. IF( APPLYLEFT ) THEN * * Form H * C * IF( LASTV.GT.0 ) THEN * * w(1:lastc,1) := C(1:lastv,1:lastc)**T * v(1:lastv,1) * CALL SGEMV( 'Transpose', LASTV, LASTC, ONE, C, LDC, V, INCV, $ ZERO, WORK, 1 ) * * C(1:lastv,1:lastc) := C(...) - v(1:lastv,1) * w(1:lastc,1)**T * CALL SGER( LASTV, LASTC, -TAU, V, INCV, WORK, 1, C, LDC ) END IF ELSE * * Form C * H * IF( LASTV.GT.0 ) THEN * * w(1:lastc,1) := C(1:lastc,1:lastv) * v(1:lastv,1) * CALL SGEMV( 'No transpose', LASTC, LASTV, ONE, C, LDC, $ V, INCV, ZERO, WORK, 1 ) * * C(1:lastc,1:lastv) := C(...) - w(1:lastc,1) * v(1:lastv,1)**T * CALL SGER( LASTC, LASTV, -TAU, WORK, 1, V, INCV, C, LDC ) END IF END IF RETURN * * End of SLARF * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/slarfb.f ================================================ *> \brief \b SLARFB * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download SLARFB + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE SLARFB( SIDE, TRANS, DIRECT, STOREV, M, N, K, V, LDV, * T, LDT, C, LDC, WORK, LDWORK ) * * .. Scalar Arguments .. * CHARACTER DIRECT, SIDE, STOREV, TRANS * INTEGER K, LDC, LDT, LDV, LDWORK, M, N * .. * .. Array Arguments .. * REAL C( LDC, * ), T( LDT, * ), V( LDV, * ), * $ WORK( LDWORK, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> SLARFB applies a real block reflector H or its transpose H**T to a *> real m by n matrix C, from either the left or the right. *> \endverbatim * * Arguments: * ========== * *> \param[in] SIDE *> \verbatim *> SIDE is CHARACTER*1 *> = 'L': apply H or H**T from the Left *> = 'R': apply H or H**T from the Right *> \endverbatim *> *> \param[in] TRANS *> \verbatim *> TRANS is CHARACTER*1 *> = 'N': apply H (No transpose) *> = 'T': apply H**T (Transpose) *> \endverbatim *> *> \param[in] DIRECT *> \verbatim *> DIRECT is CHARACTER*1 *> Indicates how H is formed from a product of elementary *> reflectors *> = 'F': H = H(1) H(2) . . . H(k) (Forward) *> = 'B': H = H(k) . . . H(2) H(1) (Backward) *> \endverbatim *> *> \param[in] STOREV *> \verbatim *> STOREV is CHARACTER*1 *> Indicates how the vectors which define the elementary *> reflectors are stored: *> = 'C': Columnwise *> = 'R': Rowwise *> \endverbatim *> *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix C. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix C. *> \endverbatim *> *> \param[in] K *> \verbatim *> K is INTEGER *> The order of the matrix T (= the number of elementary *> reflectors whose product defines the block reflector). *> \endverbatim *> *> \param[in] V *> \verbatim *> V is REAL array, dimension *> (LDV,K) if STOREV = 'C' *> (LDV,M) if STOREV = 'R' and SIDE = 'L' *> (LDV,N) if STOREV = 'R' and SIDE = 'R' *> The matrix V. See Further Details. *> \endverbatim *> *> \param[in] LDV *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V. *> If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M); *> if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N); *> if STOREV = 'R', LDV >= K. *> \endverbatim *> *> \param[in] T *> \verbatim *> T is REAL array, dimension (LDT,K) *> The triangular k by k matrix T in the representation of the *> block reflector. *> \endverbatim *> *> \param[in] LDT *> \verbatim *> LDT is INTEGER *> The leading dimension of the array T. LDT >= K. *> \endverbatim *> *> \param[in,out] C *> \verbatim *> C is REAL array, dimension (LDC,N) *> On entry, the m by n matrix C. *> On exit, C is overwritten by H*C or H**T*C or C*H or C*H**T. *> \endverbatim *> *> \param[in] LDC *> \verbatim *> LDC is INTEGER *> The leading dimension of the array C. LDC >= max(1,M). *> \endverbatim *> *> \param[out] WORK *> \verbatim *> WORK is REAL array, dimension (LDWORK,K) *> \endverbatim *> *> \param[in] LDWORK *> \verbatim *> LDWORK is INTEGER *> The leading dimension of the array WORK. *> If SIDE = 'L', LDWORK >= max(1,N); *> if SIDE = 'R', LDWORK >= max(1,M). *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup realOTHERauxiliary * *> \par Further Details: * ===================== *> *> \verbatim *> *> The shape of the matrix V and the storage of the vectors which define *> the H(i) is best illustrated by the following example with n = 5 and *> k = 3. The elements equal to 1 are not stored; the corresponding *> array elements are modified but restored on exit. The rest of the *> array is not used. *> *> DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': *> *> V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) *> ( v1 1 ) ( 1 v2 v2 v2 ) *> ( v1 v2 1 ) ( 1 v3 v3 ) *> ( v1 v2 v3 ) *> ( v1 v2 v3 ) *> *> DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': *> *> V = ( v1 v2 v3 ) V = ( v1 v1 1 ) *> ( v1 v2 v3 ) ( v2 v2 v2 1 ) *> ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) *> ( 1 v3 ) *> ( 1 ) *> \endverbatim *> * ===================================================================== SUBROUTINE SLARFB( SIDE, TRANS, DIRECT, STOREV, M, N, K, V, LDV, $ T, LDT, C, LDC, WORK, LDWORK ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. CHARACTER DIRECT, SIDE, STOREV, TRANS INTEGER K, LDC, LDT, LDV, LDWORK, M, N * .. * .. Array Arguments .. REAL C( LDC, * ), T( LDT, * ), V( LDV, * ), $ WORK( LDWORK, * ) * .. * * ===================================================================== * * .. Parameters .. REAL ONE PARAMETER ( ONE = 1.0E+0 ) * .. * .. Local Scalars .. CHARACTER TRANST INTEGER I, J, LASTV, LASTC * .. * .. External Functions .. LOGICAL LSAME INTEGER ILASLR, ILASLC EXTERNAL LSAME, ILASLR, ILASLC * .. * .. External Subroutines .. EXTERNAL SCOPY, SGEMM, STRMM * .. * .. Executable Statements .. * * Quick return if possible * IF( M.LE.0 .OR. N.LE.0 ) $ RETURN * IF( LSAME( TRANS, 'N' ) ) THEN TRANST = 'T' ELSE TRANST = 'N' END IF * IF( LSAME( STOREV, 'C' ) ) THEN * IF( LSAME( DIRECT, 'F' ) ) THEN * * Let V = ( V1 ) (first K rows) * ( V2 ) * where V1 is unit lower triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**T * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILASLR( M, K, V, LDV ) ) LASTC = ILASLC( LASTV, N, C, LDC ) * * W := C**T * V = (C1**T * V1 + C2**T * V2) (stored in WORK) * * W := C1**T * DO 10 J = 1, K CALL SCOPY( LASTC, C( J, 1 ), LDC, WORK( 1, J ), 1 ) 10 CONTINUE * * W := W * V1 * CALL STRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2**T *V2 * CALL SGEMM( 'Transpose', 'No transpose', $ LASTC, K, LASTV-K, $ ONE, C( K+1, 1 ), LDC, V( K+1, 1 ), LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T**T or W * T * CALL STRMM( 'Right', 'Upper', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V * W**T * IF( LASTV.GT.K ) THEN * * C2 := C2 - V2 * W**T * CALL SGEMM( 'No transpose', 'Transpose', $ LASTV-K, LASTC, K, $ -ONE, V( K+1, 1 ), LDV, WORK, LDWORK, ONE, $ C( K+1, 1 ), LDC ) END IF * * W := W * V1**T * CALL STRMM( 'Right', 'Lower', 'Transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W**T * DO 30 J = 1, K DO 20 I = 1, LASTC C( J, I ) = C( J, I ) - WORK( I, J ) 20 CONTINUE 30 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**T where C = ( C1 C2 ) * LASTV = MAX( K, ILASLR( N, K, V, LDV ) ) LASTC = ILASLR( M, LASTV, C, LDC ) * * W := C * V = (C1*V1 + C2*V2) (stored in WORK) * * W := C1 * DO 40 J = 1, K CALL SCOPY( LASTC, C( 1, J ), 1, WORK( 1, J ), 1 ) 40 CONTINUE * * W := W * V1 * CALL STRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2 * V2 * CALL SGEMM( 'No transpose', 'No transpose', $ LASTC, K, LASTV-K, $ ONE, C( 1, K+1 ), LDC, V( K+1, 1 ), LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**T * CALL STRMM( 'Right', 'Upper', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V**T * IF( LASTV.GT.K ) THEN * * C2 := C2 - W * V2**T * CALL SGEMM( 'No transpose', 'Transpose', $ LASTC, LASTV-K, K, $ -ONE, WORK, LDWORK, V( K+1, 1 ), LDV, ONE, $ C( 1, K+1 ), LDC ) END IF * * W := W * V1**T * CALL STRMM( 'Right', 'Lower', 'Transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W * DO 60 J = 1, K DO 50 I = 1, LASTC C( I, J ) = C( I, J ) - WORK( I, J ) 50 CONTINUE 60 CONTINUE END IF * ELSE * * Let V = ( V1 ) * ( V2 ) (last K rows) * where V2 is unit upper triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**T * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILASLR( M, K, V, LDV ) ) LASTC = ILASLC( LASTV, N, C, LDC ) * * W := C**T * V = (C1**T * V1 + C2**T * V2) (stored in WORK) * * W := C2**T * DO 70 J = 1, K CALL SCOPY( LASTC, C( LASTV-K+J, 1 ), LDC, $ WORK( 1, J ), 1 ) 70 CONTINUE * * W := W * V2 * CALL STRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1**T*V1 * CALL SGEMM( 'Transpose', 'No transpose', $ LASTC, K, LASTV-K, ONE, C, LDC, V, LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T**T or W * T * CALL STRMM( 'Right', 'Lower', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V * W**T * IF( LASTV.GT.K ) THEN * * C1 := C1 - V1 * W**T * CALL SGEMM( 'No transpose', 'Transpose', $ LASTV-K, LASTC, K, -ONE, V, LDV, WORK, LDWORK, $ ONE, C, LDC ) END IF * * W := W * V2**T * CALL STRMM( 'Right', 'Upper', 'Transpose', 'Unit', $ LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) * * C2 := C2 - W**T * DO 90 J = 1, K DO 80 I = 1, LASTC C( LASTV-K+J, I ) = C( LASTV-K+J, I ) - WORK(I, J) 80 CONTINUE 90 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**T where C = ( C1 C2 ) * LASTV = MAX( K, ILASLR( N, K, V, LDV ) ) LASTC = ILASLR( M, LASTV, C, LDC ) * * W := C * V = (C1*V1 + C2*V2) (stored in WORK) * * W := C2 * DO 100 J = 1, K CALL SCOPY( LASTC, C( 1, N-K+J ), 1, WORK( 1, J ), 1 ) 100 CONTINUE * * W := W * V2 * CALL STRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1 * V1 * CALL SGEMM( 'No transpose', 'No transpose', $ LASTC, K, LASTV-K, ONE, C, LDC, V, LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**T * CALL STRMM( 'Right', 'Lower', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V**T * IF( LASTV.GT.K ) THEN * * C1 := C1 - W * V1**T * CALL SGEMM( 'No transpose', 'Transpose', $ LASTC, LASTV-K, K, -ONE, WORK, LDWORK, V, LDV, $ ONE, C, LDC ) END IF * * W := W * V2**T * CALL STRMM( 'Right', 'Upper', 'Transpose', 'Unit', $ LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) * * C2 := C2 - W * DO 120 J = 1, K DO 110 I = 1, LASTC C( I, LASTV-K+J ) = C( I, LASTV-K+J ) - WORK(I, J) 110 CONTINUE 120 CONTINUE END IF END IF * ELSE IF( LSAME( STOREV, 'R' ) ) THEN * IF( LSAME( DIRECT, 'F' ) ) THEN * * Let V = ( V1 V2 ) (V1: first K columns) * where V1 is unit upper triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**T * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILASLC( K, M, V, LDV ) ) LASTC = ILASLC( LASTV, N, C, LDC ) * * W := C**T * V**T = (C1**T * V1**T + C2**T * V2**T) (stored in WORK) * * W := C1**T * DO 130 J = 1, K CALL SCOPY( LASTC, C( J, 1 ), LDC, WORK( 1, J ), 1 ) 130 CONTINUE * * W := W * V1**T * CALL STRMM( 'Right', 'Upper', 'Transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2**T*V2**T * CALL SGEMM( 'Transpose', 'Transpose', $ LASTC, K, LASTV-K, $ ONE, C( K+1, 1 ), LDC, V( 1, K+1 ), LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T**T or W * T * CALL STRMM( 'Right', 'Upper', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V**T * W**T * IF( LASTV.GT.K ) THEN * * C2 := C2 - V2**T * W**T * CALL SGEMM( 'Transpose', 'Transpose', $ LASTV-K, LASTC, K, $ -ONE, V( 1, K+1 ), LDV, WORK, LDWORK, $ ONE, C( K+1, 1 ), LDC ) END IF * * W := W * V1 * CALL STRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W**T * DO 150 J = 1, K DO 140 I = 1, LASTC C( J, I ) = C( J, I ) - WORK( I, J ) 140 CONTINUE 150 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**T where C = ( C1 C2 ) * LASTV = MAX( K, ILASLC( K, N, V, LDV ) ) LASTC = ILASLR( M, LASTV, C, LDC ) * * W := C * V**T = (C1*V1**T + C2*V2**T) (stored in WORK) * * W := C1 * DO 160 J = 1, K CALL SCOPY( LASTC, C( 1, J ), 1, WORK( 1, J ), 1 ) 160 CONTINUE * * W := W * V1**T * CALL STRMM( 'Right', 'Upper', 'Transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2 * V2**T * CALL SGEMM( 'No transpose', 'Transpose', $ LASTC, K, LASTV-K, $ ONE, C( 1, K+1 ), LDC, V( 1, K+1 ), LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**T * CALL STRMM( 'Right', 'Upper', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V * IF( LASTV.GT.K ) THEN * * C2 := C2 - W * V2 * CALL SGEMM( 'No transpose', 'No transpose', $ LASTC, LASTV-K, K, $ -ONE, WORK, LDWORK, V( 1, K+1 ), LDV, $ ONE, C( 1, K+1 ), LDC ) END IF * * W := W * V1 * CALL STRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W * DO 180 J = 1, K DO 170 I = 1, LASTC C( I, J ) = C( I, J ) - WORK( I, J ) 170 CONTINUE 180 CONTINUE * END IF * ELSE * * Let V = ( V1 V2 ) (V2: last K columns) * where V2 is unit lower triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**T * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILASLC( K, M, V, LDV ) ) LASTC = ILASLC( LASTV, N, C, LDC ) * * W := C**T * V**T = (C1**T * V1**T + C2**T * V2**T) (stored in WORK) * * W := C2**T * DO 190 J = 1, K CALL SCOPY( LASTC, C( LASTV-K+J, 1 ), LDC, $ WORK( 1, J ), 1 ) 190 CONTINUE * * W := W * V2**T * CALL STRMM( 'Right', 'Lower', 'Transpose', 'Unit', $ LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1**T * V1**T * CALL SGEMM( 'Transpose', 'Transpose', $ LASTC, K, LASTV-K, ONE, C, LDC, V, LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T**T or W * T * CALL STRMM( 'Right', 'Lower', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V**T * W**T * IF( LASTV.GT.K ) THEN * * C1 := C1 - V1**T * W**T * CALL SGEMM( 'Transpose', 'Transpose', $ LASTV-K, LASTC, K, -ONE, V, LDV, WORK, LDWORK, $ ONE, C, LDC ) END IF * * W := W * V2 * CALL STRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) * * C2 := C2 - W**T * DO 210 J = 1, K DO 200 I = 1, LASTC C( LASTV-K+J, I ) = C( LASTV-K+J, I ) - WORK(I, J) 200 CONTINUE 210 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**T where C = ( C1 C2 ) * LASTV = MAX( K, ILASLC( K, N, V, LDV ) ) LASTC = ILASLR( M, LASTV, C, LDC ) * * W := C * V**T = (C1*V1**T + C2*V2**T) (stored in WORK) * * W := C2 * DO 220 J = 1, K CALL SCOPY( LASTC, C( 1, LASTV-K+J ), 1, $ WORK( 1, J ), 1 ) 220 CONTINUE * * W := W * V2**T * CALL STRMM( 'Right', 'Lower', 'Transpose', 'Unit', $ LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1 * V1**T * CALL SGEMM( 'No transpose', 'Transpose', $ LASTC, K, LASTV-K, ONE, C, LDC, V, LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**T * CALL STRMM( 'Right', 'Lower', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V * IF( LASTV.GT.K ) THEN * * C1 := C1 - W * V1 * CALL SGEMM( 'No transpose', 'No transpose', $ LASTC, LASTV-K, K, -ONE, WORK, LDWORK, V, LDV, $ ONE, C, LDC ) END IF * * W := W * V2 * CALL STRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) * * C1 := C1 - W * DO 240 J = 1, K DO 230 I = 1, LASTC C( I, LASTV-K+J ) = C( I, LASTV-K+J ) $ - WORK( I, J ) 230 CONTINUE 240 CONTINUE * END IF * END IF END IF * RETURN * * End of SLARFB * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/slarfg.f ================================================ *> \brief \b SLARFG * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download SLARFG + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE SLARFG( N, ALPHA, X, INCX, TAU ) * * .. Scalar Arguments .. * INTEGER INCX, N * REAL ALPHA, TAU * .. * .. Array Arguments .. * REAL X( * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> SLARFG generates a real elementary reflector H of order n, such *> that *> *> H * ( alpha ) = ( beta ), H**T * H = I. *> ( x ) ( 0 ) *> *> where alpha and beta are scalars, and x is an (n-1)-element real *> vector. H is represented in the form *> *> H = I - tau * ( 1 ) * ( 1 v**T ) , *> ( v ) *> *> where tau is a real scalar and v is a real (n-1)-element *> vector. *> *> If the elements of x are all zero, then tau = 0 and H is taken to be *> the unit matrix. *> *> Otherwise 1 <= tau <= 2. *> \endverbatim * * Arguments: * ========== * *> \param[in] N *> \verbatim *> N is INTEGER *> The order of the elementary reflector. *> \endverbatim *> *> \param[in,out] ALPHA *> \verbatim *> ALPHA is REAL *> On entry, the value alpha. *> On exit, it is overwritten with the value beta. *> \endverbatim *> *> \param[in,out] X *> \verbatim *> X is REAL array, dimension *> (1+(N-2)*abs(INCX)) *> On entry, the vector x. *> On exit, it is overwritten with the vector v. *> \endverbatim *> *> \param[in] INCX *> \verbatim *> INCX is INTEGER *> The increment between elements of X. INCX > 0. *> \endverbatim *> *> \param[out] TAU *> \verbatim *> TAU is REAL *> The value tau. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup realOTHERauxiliary * * ===================================================================== SUBROUTINE SLARFG( N, ALPHA, X, INCX, TAU ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. INTEGER INCX, N REAL ALPHA, TAU * .. * .. Array Arguments .. REAL X( * ) * .. * * ===================================================================== * * .. Parameters .. REAL ONE, ZERO PARAMETER ( ONE = 1.0E+0, ZERO = 0.0E+0 ) * .. * .. Local Scalars .. INTEGER J, KNT REAL BETA, RSAFMN, SAFMIN, XNORM * .. * .. External Functions .. REAL SLAMCH, SLAPY2, SNRM2 EXTERNAL SLAMCH, SLAPY2, SNRM2 * .. * .. Intrinsic Functions .. INTRINSIC ABS, SIGN * .. * .. External Subroutines .. EXTERNAL SSCAL * .. * .. Executable Statements .. * IF( N.LE.1 ) THEN TAU = ZERO RETURN END IF * XNORM = SNRM2( N-1, X, INCX ) * IF( XNORM.EQ.ZERO ) THEN * * H = I * TAU = ZERO ELSE * * general case * BETA = -SIGN( SLAPY2( ALPHA, XNORM ), ALPHA ) SAFMIN = SLAMCH( 'S' ) / SLAMCH( 'E' ) KNT = 0 IF( ABS( BETA ).LT.SAFMIN ) THEN * * XNORM, BETA may be inaccurate; scale X and recompute them * RSAFMN = ONE / SAFMIN 10 CONTINUE KNT = KNT + 1 CALL SSCAL( N-1, RSAFMN, X, INCX ) BETA = BETA*RSAFMN ALPHA = ALPHA*RSAFMN IF( ABS( BETA ).LT.SAFMIN ) $ GO TO 10 * * New BETA is at most 1, at least SAFMIN * XNORM = SNRM2( N-1, X, INCX ) BETA = -SIGN( SLAPY2( ALPHA, XNORM ), ALPHA ) END IF TAU = ( BETA-ALPHA ) / BETA CALL SSCAL( N-1, ONE / ( ALPHA-BETA ), X, INCX ) * * If ALPHA is subnormal, it may lose relative accuracy * DO 20 J = 1, KNT BETA = BETA*SAFMIN 20 CONTINUE ALPHA = BETA END IF * RETURN * * End of SLARFG * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/slarft.f ================================================ *> \brief \b SLARFT * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download SLARFT + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE SLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) * * .. Scalar Arguments .. * CHARACTER DIRECT, STOREV * INTEGER K, LDT, LDV, N * .. * .. Array Arguments .. * REAL T( LDT, * ), TAU( * ), V( LDV, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> SLARFT forms the triangular factor T of a real block reflector H *> of order n, which is defined as a product of k elementary reflectors. *> *> If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; *> *> If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. *> *> If STOREV = 'C', the vector which defines the elementary reflector *> H(i) is stored in the i-th column of the array V, and *> *> H = I - V * T * V**T *> *> If STOREV = 'R', the vector which defines the elementary reflector *> H(i) is stored in the i-th row of the array V, and *> *> H = I - V**T * T * V *> \endverbatim * * Arguments: * ========== * *> \param[in] DIRECT *> \verbatim *> DIRECT is CHARACTER*1 *> Specifies the order in which the elementary reflectors are *> multiplied to form the block reflector: *> = 'F': H = H(1) H(2) . . . H(k) (Forward) *> = 'B': H = H(k) . . . H(2) H(1) (Backward) *> \endverbatim *> *> \param[in] STOREV *> \verbatim *> STOREV is CHARACTER*1 *> Specifies how the vectors which define the elementary *> reflectors are stored (see also Further Details): *> = 'C': columnwise *> = 'R': rowwise *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The order of the block reflector H. N >= 0. *> \endverbatim *> *> \param[in] K *> \verbatim *> K is INTEGER *> The order of the triangular factor T (= the number of *> elementary reflectors). K >= 1. *> \endverbatim *> *> \param[in] V *> \verbatim *> V is REAL array, dimension *> (LDV,K) if STOREV = 'C' *> (LDV,N) if STOREV = 'R' *> The matrix V. See further details. *> \endverbatim *> *> \param[in] LDV *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V. *> If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. *> \endverbatim *> *> \param[in] TAU *> \verbatim *> TAU is REAL array, dimension (K) *> TAU(i) must contain the scalar factor of the elementary *> reflector H(i). *> \endverbatim *> *> \param[out] T *> \verbatim *> T is REAL array, dimension (LDT,K) *> The k by k triangular factor T of the block reflector. *> If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is *> lower triangular. The rest of the array is not used. *> \endverbatim *> *> \param[in] LDT *> \verbatim *> LDT is INTEGER *> The leading dimension of the array T. LDT >= K. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date April 2012 * *> \ingroup realOTHERauxiliary * *> \par Further Details: * ===================== *> *> \verbatim *> *> The shape of the matrix V and the storage of the vectors which define *> the H(i) is best illustrated by the following example with n = 5 and *> k = 3. The elements equal to 1 are not stored. *> *> DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': *> *> V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) *> ( v1 1 ) ( 1 v2 v2 v2 ) *> ( v1 v2 1 ) ( 1 v3 v3 ) *> ( v1 v2 v3 ) *> ( v1 v2 v3 ) *> *> DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': *> *> V = ( v1 v2 v3 ) V = ( v1 v1 1 ) *> ( v1 v2 v3 ) ( v2 v2 v2 1 ) *> ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) *> ( 1 v3 ) *> ( 1 ) *> \endverbatim *> * ===================================================================== SUBROUTINE SLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) * * -- LAPACK auxiliary routine (version 3.4.1) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * April 2012 * * .. Scalar Arguments .. CHARACTER DIRECT, STOREV INTEGER K, LDT, LDV, N * .. * .. Array Arguments .. REAL T( LDT, * ), TAU( * ), V( LDV, * ) * .. * * ===================================================================== * * .. Parameters .. REAL ONE, ZERO PARAMETER ( ONE = 1.0E+0, ZERO = 0.0E+0 ) * .. * .. Local Scalars .. INTEGER I, J, PREVLASTV, LASTV * .. * .. External Subroutines .. EXTERNAL SGEMV, STRMV * .. * .. External Functions .. LOGICAL LSAME EXTERNAL LSAME * .. * .. Executable Statements .. * * Quick return if possible * IF( N.EQ.0 ) $ RETURN * IF( LSAME( DIRECT, 'F' ) ) THEN PREVLASTV = N DO I = 1, K PREVLASTV = MAX( I, PREVLASTV ) IF( TAU( I ).EQ.ZERO ) THEN * * H(i) = I * DO J = 1, I T( J, I ) = ZERO END DO ELSE * * general case * IF( LSAME( STOREV, 'C' ) ) THEN * Skip any trailing zeros. DO LASTV = N, I+1, -1 IF( V( LASTV, I ).NE.ZERO ) EXIT END DO DO J = 1, I-1 T( J, I ) = -TAU( I ) * V( I , J ) END DO J = MIN( LASTV, PREVLASTV ) * * T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)**T * V(i:j,i) * CALL SGEMV( 'Transpose', J-I, I-1, -TAU( I ), $ V( I+1, 1 ), LDV, V( I+1, I ), 1, ONE, $ T( 1, I ), 1 ) ELSE * Skip any trailing zeros. DO LASTV = N, I+1, -1 IF( V( I, LASTV ).NE.ZERO ) EXIT END DO DO J = 1, I-1 T( J, I ) = -TAU( I ) * V( J , I ) END DO J = MIN( LASTV, PREVLASTV ) * * T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)**T * CALL SGEMV( 'No transpose', I-1, J-I, -TAU( I ), $ V( 1, I+1 ), LDV, V( I, I+1 ), LDV, $ ONE, T( 1, I ), 1 ) END IF * * T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) * CALL STRMV( 'Upper', 'No transpose', 'Non-unit', I-1, T, $ LDT, T( 1, I ), 1 ) T( I, I ) = TAU( I ) IF( I.GT.1 ) THEN PREVLASTV = MAX( PREVLASTV, LASTV ) ELSE PREVLASTV = LASTV END IF END IF END DO ELSE PREVLASTV = 1 DO I = K, 1, -1 IF( TAU( I ).EQ.ZERO ) THEN * * H(i) = I * DO J = I, K T( J, I ) = ZERO END DO ELSE * * general case * IF( I.LT.K ) THEN IF( LSAME( STOREV, 'C' ) ) THEN * Skip any leading zeros. DO LASTV = 1, I-1 IF( V( LASTV, I ).NE.ZERO ) EXIT END DO DO J = I+1, K T( J, I ) = -TAU( I ) * V( N-K+I , J ) END DO J = MAX( LASTV, PREVLASTV ) * * T(i+1:k,i) = -tau(i) * V(j:n-k+i,i+1:k)**T * V(j:n-k+i,i) * CALL SGEMV( 'Transpose', N-K+I-J, K-I, -TAU( I ), $ V( J, I+1 ), LDV, V( J, I ), 1, ONE, $ T( I+1, I ), 1 ) ELSE * Skip any leading zeros. DO LASTV = 1, I-1 IF( V( I, LASTV ).NE.ZERO ) EXIT END DO DO J = I+1, K T( J, I ) = -TAU( I ) * V( J, N-K+I ) END DO J = MAX( LASTV, PREVLASTV ) * * T(i+1:k,i) = -tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)**T * CALL SGEMV( 'No transpose', K-I, N-K+I-J, $ -TAU( I ), V( I+1, J ), LDV, V( I, J ), LDV, $ ONE, T( I+1, I ), 1 ) END IF * * T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) * CALL STRMV( 'Lower', 'No transpose', 'Non-unit', K-I, $ T( I+1, I+1 ), LDT, T( I+1, I ), 1 ) IF( I.GT.1 ) THEN PREVLASTV = MIN( PREVLASTV, LASTV ) ELSE PREVLASTV = LASTV END IF END IF T( I, I ) = TAU( I ) END IF END DO END IF RETURN * * End of SLARFT * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/svd.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "lapack_common.h" #include // computes the singular values/vectors a general M-by-N matrix A using divide-and-conquer EIGEN_LAPACK_FUNC(gesdd,(char *jobz, int *m, int* n, Scalar* a, int *lda, RealScalar *s, Scalar *u, int *ldu, Scalar *vt, int *ldvt, Scalar* /*work*/, int* lwork, EIGEN_LAPACK_ARG_IF_COMPLEX(RealScalar */*rwork*/) int * /*iwork*/, int *info)) { // TODO exploit the work buffer bool query_size = *lwork==-1; int diag_size = (std::min)(*m,*n); *info = 0; if(*jobz!='A' && *jobz!='S' && *jobz!='O' && *jobz!='N') *info = -1; else if(*m<0) *info = -2; else if(*n<0) *info = -3; else if(*lda=*n && *ldvt<*n)) *info = -10; if(*info!=0) { int e = -*info; return xerbla_(SCALAR_SUFFIX_UP"GESDD ", &e, 6); } if(query_size) { *lwork = 0; return 0; } if(*n==0 || *m==0) return 0; PlainMatrixType mat(*m,*n); mat = matrix(a,*m,*n,*lda); int option = *jobz=='A' ? ComputeFullU|ComputeFullV : *jobz=='S' ? ComputeThinU|ComputeThinV : *jobz=='O' ? ComputeThinU|ComputeThinV : 0; BDCSVD svd(mat,option); make_vector(s,diag_size) = svd.singularValues().head(diag_size); if(*jobz=='A') { matrix(u,*m,*m,*ldu) = svd.matrixU(); matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint(); } else if(*jobz=='S') { matrix(u,*m,diag_size,*ldu) = svd.matrixU(); matrix(vt,diag_size,*n,*ldvt) = svd.matrixV().adjoint(); } else if(*jobz=='O' && *m>=*n) { matrix(a,*m,*n,*lda) = svd.matrixU(); matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint(); } else if(*jobz=='O') { matrix(u,*m,*m,*ldu) = svd.matrixU(); matrix(a,diag_size,*n,*lda) = svd.matrixV().adjoint(); } return 0; } // computes the singular values/vectors a general M-by-N matrix A using two sided jacobi algorithm EIGEN_LAPACK_FUNC(gesvd,(char *jobu, char *jobv, int *m, int* n, Scalar* a, int *lda, RealScalar *s, Scalar *u, int *ldu, Scalar *vt, int *ldvt, Scalar* /*work*/, int* lwork, EIGEN_LAPACK_ARG_IF_COMPLEX(RealScalar */*rwork*/) int *info)) { // TODO exploit the work buffer bool query_size = *lwork==-1; int diag_size = (std::min)(*m,*n); *info = 0; if( *jobu!='A' && *jobu!='S' && *jobu!='O' && *jobu!='N') *info = -1; else if((*jobv!='A' && *jobv!='S' && *jobv!='O' && *jobv!='N') || (*jobu=='O' && *jobv=='O')) *info = -2; else if(*m<0) *info = -3; else if(*n<0) *info = -4; else if(*lda svd(mat,option); make_vector(s,diag_size) = svd.singularValues().head(diag_size); { if(*jobu=='A') matrix(u,*m,*m,*ldu) = svd.matrixU(); else if(*jobu=='S') matrix(u,*m,diag_size,*ldu) = svd.matrixU(); else if(*jobu=='O') matrix(a,*m,diag_size,*lda) = svd.matrixU(); } { if(*jobv=='A') matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint(); else if(*jobv=='S') matrix(vt,diag_size,*n,*ldvt) = svd.matrixV().adjoint(); else if(*jobv=='O') matrix(a,diag_size,*n,*lda) = svd.matrixV().adjoint(); } return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/zlacgv.f ================================================ *> \brief \b ZLACGV * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ZLACGV + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE ZLACGV( N, X, INCX ) * * .. Scalar Arguments .. * INTEGER INCX, N * .. * .. Array Arguments .. * COMPLEX*16 X( * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ZLACGV conjugates a complex vector of length N. *> \endverbatim * * Arguments: * ========== * *> \param[in] N *> \verbatim *> N is INTEGER *> The length of the vector X. N >= 0. *> \endverbatim *> *> \param[in,out] X *> \verbatim *> X is COMPLEX*16 array, dimension *> (1+(N-1)*abs(INCX)) *> On entry, the vector of length N to be conjugated. *> On exit, X is overwritten with conjg(X). *> \endverbatim *> *> \param[in] INCX *> \verbatim *> INCX is INTEGER *> The spacing between successive elements of X. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup complex16OTHERauxiliary * * ===================================================================== SUBROUTINE ZLACGV( N, X, INCX ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. INTEGER INCX, N * .. * .. Array Arguments .. COMPLEX*16 X( * ) * .. * * ===================================================================== * * .. Local Scalars .. INTEGER I, IOFF * .. * .. Intrinsic Functions .. INTRINSIC DCONJG * .. * .. Executable Statements .. * IF( INCX.EQ.1 ) THEN DO 10 I = 1, N X( I ) = DCONJG( X( I ) ) 10 CONTINUE ELSE IOFF = 1 IF( INCX.LT.0 ) $ IOFF = 1 - ( N-1 )*INCX DO 20 I = 1, N X( IOFF ) = DCONJG( X( IOFF ) ) IOFF = IOFF + INCX 20 CONTINUE END IF RETURN * * End of ZLACGV * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/zladiv.f ================================================ *> \brief \b ZLADIV * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ZLADIV + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * COMPLEX*16 FUNCTION ZLADIV( X, Y ) * * .. Scalar Arguments .. * COMPLEX*16 X, Y * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ZLADIV := X / Y, where X and Y are complex. The computation of X / Y *> will not overflow on an intermediary step unless the results *> overflows. *> \endverbatim * * Arguments: * ========== * *> \param[in] X *> \verbatim *> X is COMPLEX*16 *> \endverbatim *> *> \param[in] Y *> \verbatim *> Y is COMPLEX*16 *> The complex scalars X and Y. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup complex16OTHERauxiliary * * ===================================================================== COMPLEX*16 FUNCTION ZLADIV( X, Y ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. COMPLEX*16 X, Y * .. * * ===================================================================== * * .. Local Scalars .. DOUBLE PRECISION ZI, ZR * .. * .. External Subroutines .. EXTERNAL DLADIV * .. * .. Intrinsic Functions .. INTRINSIC DBLE, DCMPLX, DIMAG * .. * .. Executable Statements .. * CALL DLADIV( DBLE( X ), DIMAG( X ), DBLE( Y ), DIMAG( Y ), ZR, $ ZI ) ZLADIV = DCMPLX( ZR, ZI ) * RETURN * * End of ZLADIV * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/zlarf.f ================================================ *> \brief \b ZLARF * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ZLARF + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE ZLARF( SIDE, M, N, V, INCV, TAU, C, LDC, WORK ) * * .. Scalar Arguments .. * CHARACTER SIDE * INTEGER INCV, LDC, M, N * COMPLEX*16 TAU * .. * .. Array Arguments .. * COMPLEX*16 C( LDC, * ), V( * ), WORK( * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ZLARF applies a complex elementary reflector H to a complex M-by-N *> matrix C, from either the left or the right. H is represented in the *> form *> *> H = I - tau * v * v**H *> *> where tau is a complex scalar and v is a complex vector. *> *> If tau = 0, then H is taken to be the unit matrix. *> *> To apply H**H, supply conjg(tau) instead *> tau. *> \endverbatim * * Arguments: * ========== * *> \param[in] SIDE *> \verbatim *> SIDE is CHARACTER*1 *> = 'L': form H * C *> = 'R': form C * H *> \endverbatim *> *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix C. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix C. *> \endverbatim *> *> \param[in] V *> \verbatim *> V is COMPLEX*16 array, dimension *> (1 + (M-1)*abs(INCV)) if SIDE = 'L' *> or (1 + (N-1)*abs(INCV)) if SIDE = 'R' *> The vector v in the representation of H. V is not used if *> TAU = 0. *> \endverbatim *> *> \param[in] INCV *> \verbatim *> INCV is INTEGER *> The increment between elements of v. INCV <> 0. *> \endverbatim *> *> \param[in] TAU *> \verbatim *> TAU is COMPLEX*16 *> The value tau in the representation of H. *> \endverbatim *> *> \param[in,out] C *> \verbatim *> C is COMPLEX*16 array, dimension (LDC,N) *> On entry, the M-by-N matrix C. *> On exit, C is overwritten by the matrix H * C if SIDE = 'L', *> or C * H if SIDE = 'R'. *> \endverbatim *> *> \param[in] LDC *> \verbatim *> LDC is INTEGER *> The leading dimension of the array C. LDC >= max(1,M). *> \endverbatim *> *> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array, dimension *> (N) if SIDE = 'L' *> or (M) if SIDE = 'R' *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup complex16OTHERauxiliary * * ===================================================================== SUBROUTINE ZLARF( SIDE, M, N, V, INCV, TAU, C, LDC, WORK ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. CHARACTER SIDE INTEGER INCV, LDC, M, N COMPLEX*16 TAU * .. * .. Array Arguments .. COMPLEX*16 C( LDC, * ), V( * ), WORK( * ) * .. * * ===================================================================== * * .. Parameters .. COMPLEX*16 ONE, ZERO PARAMETER ( ONE = ( 1.0D+0, 0.0D+0 ), $ ZERO = ( 0.0D+0, 0.0D+0 ) ) * .. * .. Local Scalars .. LOGICAL APPLYLEFT INTEGER I, LASTV, LASTC * .. * .. External Subroutines .. EXTERNAL ZGEMV, ZGERC * .. * .. External Functions .. LOGICAL LSAME INTEGER ILAZLR, ILAZLC EXTERNAL LSAME, ILAZLR, ILAZLC * .. * .. Executable Statements .. * APPLYLEFT = LSAME( SIDE, 'L' ) LASTV = 0 LASTC = 0 IF( TAU.NE.ZERO ) THEN * Set up variables for scanning V. LASTV begins pointing to the end * of V. IF( APPLYLEFT ) THEN LASTV = M ELSE LASTV = N END IF IF( INCV.GT.0 ) THEN I = 1 + (LASTV-1) * INCV ELSE I = 1 END IF * Look for the last non-zero row in V. DO WHILE( LASTV.GT.0 .AND. V( I ).EQ.ZERO ) LASTV = LASTV - 1 I = I - INCV END DO IF( APPLYLEFT ) THEN * Scan for the last non-zero column in C(1:lastv,:). LASTC = ILAZLC(LASTV, N, C, LDC) ELSE * Scan for the last non-zero row in C(:,1:lastv). LASTC = ILAZLR(M, LASTV, C, LDC) END IF END IF * Note that lastc.eq.0 renders the BLAS operations null; no special * case is needed at this level. IF( APPLYLEFT ) THEN * * Form H * C * IF( LASTV.GT.0 ) THEN * * w(1:lastc,1) := C(1:lastv,1:lastc)**H * v(1:lastv,1) * CALL ZGEMV( 'Conjugate transpose', LASTV, LASTC, ONE, $ C, LDC, V, INCV, ZERO, WORK, 1 ) * * C(1:lastv,1:lastc) := C(...) - v(1:lastv,1) * w(1:lastc,1)**H * CALL ZGERC( LASTV, LASTC, -TAU, V, INCV, WORK, 1, C, LDC ) END IF ELSE * * Form C * H * IF( LASTV.GT.0 ) THEN * * w(1:lastc,1) := C(1:lastc,1:lastv) * v(1:lastv,1) * CALL ZGEMV( 'No transpose', LASTC, LASTV, ONE, C, LDC, $ V, INCV, ZERO, WORK, 1 ) * * C(1:lastc,1:lastv) := C(...) - w(1:lastc,1) * v(1:lastv,1)**H * CALL ZGERC( LASTC, LASTV, -TAU, WORK, 1, V, INCV, C, LDC ) END IF END IF RETURN * * End of ZLARF * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/zlarfb.f ================================================ *> \brief \b ZLARFB * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ZLARFB + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE ZLARFB( SIDE, TRANS, DIRECT, STOREV, M, N, K, V, LDV, * T, LDT, C, LDC, WORK, LDWORK ) * * .. Scalar Arguments .. * CHARACTER DIRECT, SIDE, STOREV, TRANS * INTEGER K, LDC, LDT, LDV, LDWORK, M, N * .. * .. Array Arguments .. * COMPLEX*16 C( LDC, * ), T( LDT, * ), V( LDV, * ), * $ WORK( LDWORK, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ZLARFB applies a complex block reflector H or its transpose H**H to a *> complex M-by-N matrix C, from either the left or the right. *> \endverbatim * * Arguments: * ========== * *> \param[in] SIDE *> \verbatim *> SIDE is CHARACTER*1 *> = 'L': apply H or H**H from the Left *> = 'R': apply H or H**H from the Right *> \endverbatim *> *> \param[in] TRANS *> \verbatim *> TRANS is CHARACTER*1 *> = 'N': apply H (No transpose) *> = 'C': apply H**H (Conjugate transpose) *> \endverbatim *> *> \param[in] DIRECT *> \verbatim *> DIRECT is CHARACTER*1 *> Indicates how H is formed from a product of elementary *> reflectors *> = 'F': H = H(1) H(2) . . . H(k) (Forward) *> = 'B': H = H(k) . . . H(2) H(1) (Backward) *> \endverbatim *> *> \param[in] STOREV *> \verbatim *> STOREV is CHARACTER*1 *> Indicates how the vectors which define the elementary *> reflectors are stored: *> = 'C': Columnwise *> = 'R': Rowwise *> \endverbatim *> *> \param[in] M *> \verbatim *> M is INTEGER *> The number of rows of the matrix C. *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The number of columns of the matrix C. *> \endverbatim *> *> \param[in] K *> \verbatim *> K is INTEGER *> The order of the matrix T (= the number of elementary *> reflectors whose product defines the block reflector). *> \endverbatim *> *> \param[in] V *> \verbatim *> V is COMPLEX*16 array, dimension *> (LDV,K) if STOREV = 'C' *> (LDV,M) if STOREV = 'R' and SIDE = 'L' *> (LDV,N) if STOREV = 'R' and SIDE = 'R' *> See Further Details. *> \endverbatim *> *> \param[in] LDV *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V. *> If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M); *> if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N); *> if STOREV = 'R', LDV >= K. *> \endverbatim *> *> \param[in] T *> \verbatim *> T is COMPLEX*16 array, dimension (LDT,K) *> The triangular K-by-K matrix T in the representation of the *> block reflector. *> \endverbatim *> *> \param[in] LDT *> \verbatim *> LDT is INTEGER *> The leading dimension of the array T. LDT >= K. *> \endverbatim *> *> \param[in,out] C *> \verbatim *> C is COMPLEX*16 array, dimension (LDC,N) *> On entry, the M-by-N matrix C. *> On exit, C is overwritten by H*C or H**H*C or C*H or C*H**H. *> \endverbatim *> *> \param[in] LDC *> \verbatim *> LDC is INTEGER *> The leading dimension of the array C. LDC >= max(1,M). *> \endverbatim *> *> \param[out] WORK *> \verbatim *> WORK is COMPLEX*16 array, dimension (LDWORK,K) *> \endverbatim *> *> \param[in] LDWORK *> \verbatim *> LDWORK is INTEGER *> The leading dimension of the array WORK. *> If SIDE = 'L', LDWORK >= max(1,N); *> if SIDE = 'R', LDWORK >= max(1,M). *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup complex16OTHERauxiliary * *> \par Further Details: * ===================== *> *> \verbatim *> *> The shape of the matrix V and the storage of the vectors which define *> the H(i) is best illustrated by the following example with n = 5 and *> k = 3. The elements equal to 1 are not stored; the corresponding *> array elements are modified but restored on exit. The rest of the *> array is not used. *> *> DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': *> *> V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) *> ( v1 1 ) ( 1 v2 v2 v2 ) *> ( v1 v2 1 ) ( 1 v3 v3 ) *> ( v1 v2 v3 ) *> ( v1 v2 v3 ) *> *> DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': *> *> V = ( v1 v2 v3 ) V = ( v1 v1 1 ) *> ( v1 v2 v3 ) ( v2 v2 v2 1 ) *> ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) *> ( 1 v3 ) *> ( 1 ) *> \endverbatim *> * ===================================================================== SUBROUTINE ZLARFB( SIDE, TRANS, DIRECT, STOREV, M, N, K, V, LDV, $ T, LDT, C, LDC, WORK, LDWORK ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. CHARACTER DIRECT, SIDE, STOREV, TRANS INTEGER K, LDC, LDT, LDV, LDWORK, M, N * .. * .. Array Arguments .. COMPLEX*16 C( LDC, * ), T( LDT, * ), V( LDV, * ), $ WORK( LDWORK, * ) * .. * * ===================================================================== * * .. Parameters .. COMPLEX*16 ONE PARAMETER ( ONE = ( 1.0D+0, 0.0D+0 ) ) * .. * .. Local Scalars .. CHARACTER TRANST INTEGER I, J, LASTV, LASTC * .. * .. External Functions .. LOGICAL LSAME INTEGER ILAZLR, ILAZLC EXTERNAL LSAME, ILAZLR, ILAZLC * .. * .. External Subroutines .. EXTERNAL ZCOPY, ZGEMM, ZLACGV, ZTRMM * .. * .. Intrinsic Functions .. INTRINSIC DCONJG * .. * .. Executable Statements .. * * Quick return if possible * IF( M.LE.0 .OR. N.LE.0 ) $ RETURN * IF( LSAME( TRANS, 'N' ) ) THEN TRANST = 'C' ELSE TRANST = 'N' END IF * IF( LSAME( STOREV, 'C' ) ) THEN * IF( LSAME( DIRECT, 'F' ) ) THEN * * Let V = ( V1 ) (first K rows) * ( V2 ) * where V1 is unit lower triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**H * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILAZLR( M, K, V, LDV ) ) LASTC = ILAZLC( LASTV, N, C, LDC ) * * W := C**H * V = (C1**H * V1 + C2**H * V2) (stored in WORK) * * W := C1**H * DO 10 J = 1, K CALL ZCOPY( LASTC, C( J, 1 ), LDC, WORK( 1, J ), 1 ) CALL ZLACGV( LASTC, WORK( 1, J ), 1 ) 10 CONTINUE * * W := W * V1 * CALL ZTRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2**H *V2 * CALL ZGEMM( 'Conjugate transpose', 'No transpose', $ LASTC, K, LASTV-K, ONE, C( K+1, 1 ), LDC, $ V( K+1, 1 ), LDV, ONE, WORK, LDWORK ) END IF * * W := W * T**H or W * T * CALL ZTRMM( 'Right', 'Upper', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V * W**H * IF( M.GT.K ) THEN * * C2 := C2 - V2 * W**H * CALL ZGEMM( 'No transpose', 'Conjugate transpose', $ LASTV-K, LASTC, K, $ -ONE, V( K+1, 1 ), LDV, WORK, LDWORK, $ ONE, C( K+1, 1 ), LDC ) END IF * * W := W * V1**H * CALL ZTRMM( 'Right', 'Lower', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W**H * DO 30 J = 1, K DO 20 I = 1, LASTC C( J, I ) = C( J, I ) - DCONJG( WORK( I, J ) ) 20 CONTINUE 30 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**H where C = ( C1 C2 ) * LASTV = MAX( K, ILAZLR( N, K, V, LDV ) ) LASTC = ILAZLR( M, LASTV, C, LDC ) * * W := C * V = (C1*V1 + C2*V2) (stored in WORK) * * W := C1 * DO 40 J = 1, K CALL ZCOPY( LASTC, C( 1, J ), 1, WORK( 1, J ), 1 ) 40 CONTINUE * * W := W * V1 * CALL ZTRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2 * V2 * CALL ZGEMM( 'No transpose', 'No transpose', $ LASTC, K, LASTV-K, $ ONE, C( 1, K+1 ), LDC, V( K+1, 1 ), LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**H * CALL ZTRMM( 'Right', 'Upper', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V**H * IF( LASTV.GT.K ) THEN * * C2 := C2 - W * V2**H * CALL ZGEMM( 'No transpose', 'Conjugate transpose', $ LASTC, LASTV-K, K, $ -ONE, WORK, LDWORK, V( K+1, 1 ), LDV, $ ONE, C( 1, K+1 ), LDC ) END IF * * W := W * V1**H * CALL ZTRMM( 'Right', 'Lower', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W * DO 60 J = 1, K DO 50 I = 1, LASTC C( I, J ) = C( I, J ) - WORK( I, J ) 50 CONTINUE 60 CONTINUE END IF * ELSE * * Let V = ( V1 ) * ( V2 ) (last K rows) * where V2 is unit upper triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**H * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILAZLR( M, K, V, LDV ) ) LASTC = ILAZLC( LASTV, N, C, LDC ) * * W := C**H * V = (C1**H * V1 + C2**H * V2) (stored in WORK) * * W := C2**H * DO 70 J = 1, K CALL ZCOPY( LASTC, C( LASTV-K+J, 1 ), LDC, $ WORK( 1, J ), 1 ) CALL ZLACGV( LASTC, WORK( 1, J ), 1 ) 70 CONTINUE * * W := W * V2 * CALL ZTRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1**H*V1 * CALL ZGEMM( 'Conjugate transpose', 'No transpose', $ LASTC, K, LASTV-K, $ ONE, C, LDC, V, LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T**H or W * T * CALL ZTRMM( 'Right', 'Lower', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V * W**H * IF( LASTV.GT.K ) THEN * * C1 := C1 - V1 * W**H * CALL ZGEMM( 'No transpose', 'Conjugate transpose', $ LASTV-K, LASTC, K, $ -ONE, V, LDV, WORK, LDWORK, $ ONE, C, LDC ) END IF * * W := W * V2**H * CALL ZTRMM( 'Right', 'Upper', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) * * C2 := C2 - W**H * DO 90 J = 1, K DO 80 I = 1, LASTC C( LASTV-K+J, I ) = C( LASTV-K+J, I ) - $ DCONJG( WORK( I, J ) ) 80 CONTINUE 90 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**H where C = ( C1 C2 ) * LASTV = MAX( K, ILAZLR( N, K, V, LDV ) ) LASTC = ILAZLR( M, LASTV, C, LDC ) * * W := C * V = (C1*V1 + C2*V2) (stored in WORK) * * W := C2 * DO 100 J = 1, K CALL ZCOPY( LASTC, C( 1, LASTV-K+J ), 1, $ WORK( 1, J ), 1 ) 100 CONTINUE * * W := W * V2 * CALL ZTRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1 * V1 * CALL ZGEMM( 'No transpose', 'No transpose', $ LASTC, K, LASTV-K, $ ONE, C, LDC, V, LDV, ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**H * CALL ZTRMM( 'Right', 'Lower', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V**H * IF( LASTV.GT.K ) THEN * * C1 := C1 - W * V1**H * CALL ZGEMM( 'No transpose', 'Conjugate transpose', $ LASTC, LASTV-K, K, -ONE, WORK, LDWORK, V, LDV, $ ONE, C, LDC ) END IF * * W := W * V2**H * CALL ZTRMM( 'Right', 'Upper', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V( LASTV-K+1, 1 ), LDV, $ WORK, LDWORK ) * * C2 := C2 - W * DO 120 J = 1, K DO 110 I = 1, LASTC C( I, LASTV-K+J ) = C( I, LASTV-K+J ) $ - WORK( I, J ) 110 CONTINUE 120 CONTINUE END IF END IF * ELSE IF( LSAME( STOREV, 'R' ) ) THEN * IF( LSAME( DIRECT, 'F' ) ) THEN * * Let V = ( V1 V2 ) (V1: first K columns) * where V1 is unit upper triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**H * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILAZLC( K, M, V, LDV ) ) LASTC = ILAZLC( LASTV, N, C, LDC ) * * W := C**H * V**H = (C1**H * V1**H + C2**H * V2**H) (stored in WORK) * * W := C1**H * DO 130 J = 1, K CALL ZCOPY( LASTC, C( J, 1 ), LDC, WORK( 1, J ), 1 ) CALL ZLACGV( LASTC, WORK( 1, J ), 1 ) 130 CONTINUE * * W := W * V1**H * CALL ZTRMM( 'Right', 'Upper', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2**H*V2**H * CALL ZGEMM( 'Conjugate transpose', $ 'Conjugate transpose', LASTC, K, LASTV-K, $ ONE, C( K+1, 1 ), LDC, V( 1, K+1 ), LDV, $ ONE, WORK, LDWORK ) END IF * * W := W * T**H or W * T * CALL ZTRMM( 'Right', 'Upper', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V**H * W**H * IF( LASTV.GT.K ) THEN * * C2 := C2 - V2**H * W**H * CALL ZGEMM( 'Conjugate transpose', $ 'Conjugate transpose', LASTV-K, LASTC, K, $ -ONE, V( 1, K+1 ), LDV, WORK, LDWORK, $ ONE, C( K+1, 1 ), LDC ) END IF * * W := W * V1 * CALL ZTRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W**H * DO 150 J = 1, K DO 140 I = 1, LASTC C( J, I ) = C( J, I ) - DCONJG( WORK( I, J ) ) 140 CONTINUE 150 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**H where C = ( C1 C2 ) * LASTV = MAX( K, ILAZLC( K, N, V, LDV ) ) LASTC = ILAZLR( M, LASTV, C, LDC ) * * W := C * V**H = (C1*V1**H + C2*V2**H) (stored in WORK) * * W := C1 * DO 160 J = 1, K CALL ZCOPY( LASTC, C( 1, J ), 1, WORK( 1, J ), 1 ) 160 CONTINUE * * W := W * V1**H * CALL ZTRMM( 'Right', 'Upper', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V, LDV, WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C2 * V2**H * CALL ZGEMM( 'No transpose', 'Conjugate transpose', $ LASTC, K, LASTV-K, ONE, C( 1, K+1 ), LDC, $ V( 1, K+1 ), LDV, ONE, WORK, LDWORK ) END IF * * W := W * T or W * T**H * CALL ZTRMM( 'Right', 'Upper', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V * IF( LASTV.GT.K ) THEN * * C2 := C2 - W * V2 * CALL ZGEMM( 'No transpose', 'No transpose', $ LASTC, LASTV-K, K, $ -ONE, WORK, LDWORK, V( 1, K+1 ), LDV, $ ONE, C( 1, K+1 ), LDC ) END IF * * W := W * V1 * CALL ZTRMM( 'Right', 'Upper', 'No transpose', 'Unit', $ LASTC, K, ONE, V, LDV, WORK, LDWORK ) * * C1 := C1 - W * DO 180 J = 1, K DO 170 I = 1, LASTC C( I, J ) = C( I, J ) - WORK( I, J ) 170 CONTINUE 180 CONTINUE * END IF * ELSE * * Let V = ( V1 V2 ) (V2: last K columns) * where V2 is unit lower triangular. * IF( LSAME( SIDE, 'L' ) ) THEN * * Form H * C or H**H * C where C = ( C1 ) * ( C2 ) * LASTV = MAX( K, ILAZLC( K, M, V, LDV ) ) LASTC = ILAZLC( LASTV, N, C, LDC ) * * W := C**H * V**H = (C1**H * V1**H + C2**H * V2**H) (stored in WORK) * * W := C2**H * DO 190 J = 1, K CALL ZCOPY( LASTC, C( LASTV-K+J, 1 ), LDC, $ WORK( 1, J ), 1 ) CALL ZLACGV( LASTC, WORK( 1, J ), 1 ) 190 CONTINUE * * W := W * V2**H * CALL ZTRMM( 'Right', 'Lower', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1**H * V1**H * CALL ZGEMM( 'Conjugate transpose', $ 'Conjugate transpose', LASTC, K, LASTV-K, $ ONE, C, LDC, V, LDV, ONE, WORK, LDWORK ) END IF * * W := W * T**H or W * T * CALL ZTRMM( 'Right', 'Lower', TRANST, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - V**H * W**H * IF( LASTV.GT.K ) THEN * * C1 := C1 - V1**H * W**H * CALL ZGEMM( 'Conjugate transpose', $ 'Conjugate transpose', LASTV-K, LASTC, K, $ -ONE, V, LDV, WORK, LDWORK, ONE, C, LDC ) END IF * * W := W * V2 * CALL ZTRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) * * C2 := C2 - W**H * DO 210 J = 1, K DO 200 I = 1, LASTC C( LASTV-K+J, I ) = C( LASTV-K+J, I ) - $ DCONJG( WORK( I, J ) ) 200 CONTINUE 210 CONTINUE * ELSE IF( LSAME( SIDE, 'R' ) ) THEN * * Form C * H or C * H**H where C = ( C1 C2 ) * LASTV = MAX( K, ILAZLC( K, N, V, LDV ) ) LASTC = ILAZLR( M, LASTV, C, LDC ) * * W := C * V**H = (C1*V1**H + C2*V2**H) (stored in WORK) * * W := C2 * DO 220 J = 1, K CALL ZCOPY( LASTC, C( 1, LASTV-K+J ), 1, $ WORK( 1, J ), 1 ) 220 CONTINUE * * W := W * V2**H * CALL ZTRMM( 'Right', 'Lower', 'Conjugate transpose', $ 'Unit', LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) IF( LASTV.GT.K ) THEN * * W := W + C1 * V1**H * CALL ZGEMM( 'No transpose', 'Conjugate transpose', $ LASTC, K, LASTV-K, ONE, C, LDC, V, LDV, ONE, $ WORK, LDWORK ) END IF * * W := W * T or W * T**H * CALL ZTRMM( 'Right', 'Lower', TRANS, 'Non-unit', $ LASTC, K, ONE, T, LDT, WORK, LDWORK ) * * C := C - W * V * IF( LASTV.GT.K ) THEN * * C1 := C1 - W * V1 * CALL ZGEMM( 'No transpose', 'No transpose', $ LASTC, LASTV-K, K, -ONE, WORK, LDWORK, V, LDV, $ ONE, C, LDC ) END IF * * W := W * V2 * CALL ZTRMM( 'Right', 'Lower', 'No transpose', 'Unit', $ LASTC, K, ONE, V( 1, LASTV-K+1 ), LDV, $ WORK, LDWORK ) * * C1 := C1 - W * DO 240 J = 1, K DO 230 I = 1, LASTC C( I, LASTV-K+J ) = C( I, LASTV-K+J ) $ - WORK( I, J ) 230 CONTINUE 240 CONTINUE * END IF * END IF END IF * RETURN * * End of ZLARFB * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/zlarfg.f ================================================ *> \brief \b ZLARFG * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ZLARFG + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE ZLARFG( N, ALPHA, X, INCX, TAU ) * * .. Scalar Arguments .. * INTEGER INCX, N * COMPLEX*16 ALPHA, TAU * .. * .. Array Arguments .. * COMPLEX*16 X( * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ZLARFG generates a complex elementary reflector H of order n, such *> that *> *> H**H * ( alpha ) = ( beta ), H**H * H = I. *> ( x ) ( 0 ) *> *> where alpha and beta are scalars, with beta real, and x is an *> (n-1)-element complex vector. H is represented in the form *> *> H = I - tau * ( 1 ) * ( 1 v**H ) , *> ( v ) *> *> where tau is a complex scalar and v is a complex (n-1)-element *> vector. Note that H is not hermitian. *> *> If the elements of x are all zero and alpha is real, then tau = 0 *> and H is taken to be the unit matrix. *> *> Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1 . *> \endverbatim * * Arguments: * ========== * *> \param[in] N *> \verbatim *> N is INTEGER *> The order of the elementary reflector. *> \endverbatim *> *> \param[in,out] ALPHA *> \verbatim *> ALPHA is COMPLEX*16 *> On entry, the value alpha. *> On exit, it is overwritten with the value beta. *> \endverbatim *> *> \param[in,out] X *> \verbatim *> X is COMPLEX*16 array, dimension *> (1+(N-2)*abs(INCX)) *> On entry, the vector x. *> On exit, it is overwritten with the vector v. *> \endverbatim *> *> \param[in] INCX *> \verbatim *> INCX is INTEGER *> The increment between elements of X. INCX > 0. *> \endverbatim *> *> \param[out] TAU *> \verbatim *> TAU is COMPLEX*16 *> The value tau. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date November 2011 * *> \ingroup complex16OTHERauxiliary * * ===================================================================== SUBROUTINE ZLARFG( N, ALPHA, X, INCX, TAU ) * * -- LAPACK auxiliary routine (version 3.4.0) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * November 2011 * * .. Scalar Arguments .. INTEGER INCX, N COMPLEX*16 ALPHA, TAU * .. * .. Array Arguments .. COMPLEX*16 X( * ) * .. * * ===================================================================== * * .. Parameters .. DOUBLE PRECISION ONE, ZERO PARAMETER ( ONE = 1.0D+0, ZERO = 0.0D+0 ) * .. * .. Local Scalars .. INTEGER J, KNT DOUBLE PRECISION ALPHI, ALPHR, BETA, RSAFMN, SAFMIN, XNORM * .. * .. External Functions .. DOUBLE PRECISION DLAMCH, DLAPY3, DZNRM2 COMPLEX*16 ZLADIV EXTERNAL DLAMCH, DLAPY3, DZNRM2, ZLADIV * .. * .. Intrinsic Functions .. INTRINSIC ABS, DBLE, DCMPLX, DIMAG, SIGN * .. * .. External Subroutines .. EXTERNAL ZDSCAL, ZSCAL * .. * .. Executable Statements .. * IF( N.LE.0 ) THEN TAU = ZERO RETURN END IF * XNORM = DZNRM2( N-1, X, INCX ) ALPHR = DBLE( ALPHA ) ALPHI = DIMAG( ALPHA ) * IF( XNORM.EQ.ZERO .AND. ALPHI.EQ.ZERO ) THEN * * H = I * TAU = ZERO ELSE * * general case * BETA = -SIGN( DLAPY3( ALPHR, ALPHI, XNORM ), ALPHR ) SAFMIN = DLAMCH( 'S' ) / DLAMCH( 'E' ) RSAFMN = ONE / SAFMIN * KNT = 0 IF( ABS( BETA ).LT.SAFMIN ) THEN * * XNORM, BETA may be inaccurate; scale X and recompute them * 10 CONTINUE KNT = KNT + 1 CALL ZDSCAL( N-1, RSAFMN, X, INCX ) BETA = BETA*RSAFMN ALPHI = ALPHI*RSAFMN ALPHR = ALPHR*RSAFMN IF( ABS( BETA ).LT.SAFMIN ) $ GO TO 10 * * New BETA is at most 1, at least SAFMIN * XNORM = DZNRM2( N-1, X, INCX ) ALPHA = DCMPLX( ALPHR, ALPHI ) BETA = -SIGN( DLAPY3( ALPHR, ALPHI, XNORM ), ALPHR ) END IF TAU = DCMPLX( ( BETA-ALPHR ) / BETA, -ALPHI / BETA ) ALPHA = ZLADIV( DCMPLX( ONE ), ALPHA-BETA ) CALL ZSCAL( N-1, ALPHA, X, INCX ) * * If ALPHA is subnormal, it may lose relative accuracy * DO 20 J = 1, KNT BETA = BETA*SAFMIN 20 CONTINUE ALPHA = BETA END IF * RETURN * * End of ZLARFG * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/lapack/zlarft.f ================================================ *> \brief \b ZLARFT * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * *> \htmlonly *> Download ZLARFT + dependencies *> *> [TGZ] *> *> [ZIP] *> *> [TXT] *> \endhtmlonly * * Definition: * =========== * * SUBROUTINE ZLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) * * .. Scalar Arguments .. * CHARACTER DIRECT, STOREV * INTEGER K, LDT, LDV, N * .. * .. Array Arguments .. * COMPLEX*16 T( LDT, * ), TAU( * ), V( LDV, * ) * .. * * *> \par Purpose: * ============= *> *> \verbatim *> *> ZLARFT forms the triangular factor T of a complex block reflector H *> of order n, which is defined as a product of k elementary reflectors. *> *> If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; *> *> If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. *> *> If STOREV = 'C', the vector which defines the elementary reflector *> H(i) is stored in the i-th column of the array V, and *> *> H = I - V * T * V**H *> *> If STOREV = 'R', the vector which defines the elementary reflector *> H(i) is stored in the i-th row of the array V, and *> *> H = I - V**H * T * V *> \endverbatim * * Arguments: * ========== * *> \param[in] DIRECT *> \verbatim *> DIRECT is CHARACTER*1 *> Specifies the order in which the elementary reflectors are *> multiplied to form the block reflector: *> = 'F': H = H(1) H(2) . . . H(k) (Forward) *> = 'B': H = H(k) . . . H(2) H(1) (Backward) *> \endverbatim *> *> \param[in] STOREV *> \verbatim *> STOREV is CHARACTER*1 *> Specifies how the vectors which define the elementary *> reflectors are stored (see also Further Details): *> = 'C': columnwise *> = 'R': rowwise *> \endverbatim *> *> \param[in] N *> \verbatim *> N is INTEGER *> The order of the block reflector H. N >= 0. *> \endverbatim *> *> \param[in] K *> \verbatim *> K is INTEGER *> The order of the triangular factor T (= the number of *> elementary reflectors). K >= 1. *> \endverbatim *> *> \param[in] V *> \verbatim *> V is COMPLEX*16 array, dimension *> (LDV,K) if STOREV = 'C' *> (LDV,N) if STOREV = 'R' *> The matrix V. See further details. *> \endverbatim *> *> \param[in] LDV *> \verbatim *> LDV is INTEGER *> The leading dimension of the array V. *> If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. *> \endverbatim *> *> \param[in] TAU *> \verbatim *> TAU is COMPLEX*16 array, dimension (K) *> TAU(i) must contain the scalar factor of the elementary *> reflector H(i). *> \endverbatim *> *> \param[out] T *> \verbatim *> T is COMPLEX*16 array, dimension (LDT,K) *> The k by k triangular factor T of the block reflector. *> If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is *> lower triangular. The rest of the array is not used. *> \endverbatim *> *> \param[in] LDT *> \verbatim *> LDT is INTEGER *> The leading dimension of the array T. LDT >= K. *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date April 2012 * *> \ingroup complex16OTHERauxiliary * *> \par Further Details: * ===================== *> *> \verbatim *> *> The shape of the matrix V and the storage of the vectors which define *> the H(i) is best illustrated by the following example with n = 5 and *> k = 3. The elements equal to 1 are not stored. *> *> DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': *> *> V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) *> ( v1 1 ) ( 1 v2 v2 v2 ) *> ( v1 v2 1 ) ( 1 v3 v3 ) *> ( v1 v2 v3 ) *> ( v1 v2 v3 ) *> *> DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': *> *> V = ( v1 v2 v3 ) V = ( v1 v1 1 ) *> ( v1 v2 v3 ) ( v2 v2 v2 1 ) *> ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) *> ( 1 v3 ) *> ( 1 ) *> \endverbatim *> * ===================================================================== SUBROUTINE ZLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) * * -- LAPACK auxiliary routine (version 3.4.1) -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * April 2012 * * .. Scalar Arguments .. CHARACTER DIRECT, STOREV INTEGER K, LDT, LDV, N * .. * .. Array Arguments .. COMPLEX*16 T( LDT, * ), TAU( * ), V( LDV, * ) * .. * * ===================================================================== * * .. Parameters .. COMPLEX*16 ONE, ZERO PARAMETER ( ONE = ( 1.0D+0, 0.0D+0 ), $ ZERO = ( 0.0D+0, 0.0D+0 ) ) * .. * .. Local Scalars .. INTEGER I, J, PREVLASTV, LASTV * .. * .. External Subroutines .. EXTERNAL ZGEMV, ZLACGV, ZTRMV * .. * .. External Functions .. LOGICAL LSAME EXTERNAL LSAME * .. * .. Executable Statements .. * * Quick return if possible * IF( N.EQ.0 ) $ RETURN * IF( LSAME( DIRECT, 'F' ) ) THEN PREVLASTV = N DO I = 1, K PREVLASTV = MAX( PREVLASTV, I ) IF( TAU( I ).EQ.ZERO ) THEN * * H(i) = I * DO J = 1, I T( J, I ) = ZERO END DO ELSE * * general case * IF( LSAME( STOREV, 'C' ) ) THEN * Skip any trailing zeros. DO LASTV = N, I+1, -1 IF( V( LASTV, I ).NE.ZERO ) EXIT END DO DO J = 1, I-1 T( J, I ) = -TAU( I ) * CONJG( V( I , J ) ) END DO J = MIN( LASTV, PREVLASTV ) * * T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)**H * V(i:j,i) * CALL ZGEMV( 'Conjugate transpose', J-I, I-1, $ -TAU( I ), V( I+1, 1 ), LDV, $ V( I+1, I ), 1, ONE, T( 1, I ), 1 ) ELSE * Skip any trailing zeros. DO LASTV = N, I+1, -1 IF( V( I, LASTV ).NE.ZERO ) EXIT END DO DO J = 1, I-1 T( J, I ) = -TAU( I ) * V( J , I ) END DO J = MIN( LASTV, PREVLASTV ) * * T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)**H * CALL ZGEMM( 'N', 'C', I-1, 1, J-I, -TAU( I ), $ V( 1, I+1 ), LDV, V( I, I+1 ), LDV, $ ONE, T( 1, I ), LDT ) END IF * * T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) * CALL ZTRMV( 'Upper', 'No transpose', 'Non-unit', I-1, T, $ LDT, T( 1, I ), 1 ) T( I, I ) = TAU( I ) IF( I.GT.1 ) THEN PREVLASTV = MAX( PREVLASTV, LASTV ) ELSE PREVLASTV = LASTV END IF END IF END DO ELSE PREVLASTV = 1 DO I = K, 1, -1 IF( TAU( I ).EQ.ZERO ) THEN * * H(i) = I * DO J = I, K T( J, I ) = ZERO END DO ELSE * * general case * IF( I.LT.K ) THEN IF( LSAME( STOREV, 'C' ) ) THEN * Skip any leading zeros. DO LASTV = 1, I-1 IF( V( LASTV, I ).NE.ZERO ) EXIT END DO DO J = I+1, K T( J, I ) = -TAU( I ) * CONJG( V( N-K+I , J ) ) END DO J = MAX( LASTV, PREVLASTV ) * * T(i+1:k,i) = -tau(i) * V(j:n-k+i,i+1:k)**H * V(j:n-k+i,i) * CALL ZGEMV( 'Conjugate transpose', N-K+I-J, K-I, $ -TAU( I ), V( J, I+1 ), LDV, V( J, I ), $ 1, ONE, T( I+1, I ), 1 ) ELSE * Skip any leading zeros. DO LASTV = 1, I-1 IF( V( I, LASTV ).NE.ZERO ) EXIT END DO DO J = I+1, K T( J, I ) = -TAU( I ) * V( J, N-K+I ) END DO J = MAX( LASTV, PREVLASTV ) * * T(i+1:k,i) = -tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)**H * CALL ZGEMM( 'N', 'C', K-I, 1, N-K+I-J, -TAU( I ), $ V( I+1, J ), LDV, V( I, J ), LDV, $ ONE, T( I+1, I ), LDT ) END IF * * T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) * CALL ZTRMV( 'Lower', 'No transpose', 'Non-unit', K-I, $ T( I+1, I+1 ), LDT, T( I+1, I ), 1 ) IF( I.GT.1 ) THEN PREVLASTV = MIN( PREVLASTV, LASTV ) ELSE PREVLASTV = LASTV END IF END IF T( I, I ) = TAU( I ) END IF END DO END IF RETURN * * End of ZLARFT * END ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/scripts/cdashtesting.cmake.in ================================================ set(CTEST_SOURCE_DIRECTORY "@CMAKE_SOURCE_DIR@") set(CTEST_BINARY_DIRECTORY "@CMAKE_BINARY_DIR@") set(CTEST_CMAKE_GENERATOR "@CMAKE_GENERATOR@") set(CTEST_BUILD_NAME "@BUILDNAME@") set(CTEST_SITE "@SITE@") set(MODEL Experimental) if(${CTEST_SCRIPT_ARG} MATCHES Nightly) set(MODEL Nightly) elseif(${CTEST_SCRIPT_ARG} MATCHES Continuous) set(MODEL Continuous) endif() find_program(CTEST_GIT_COMMAND NAMES git) set(CTEST_UPDATE_COMMAND "${CTEST_GIT_COMMAND}") ctest_start(${MODEL} ${CTEST_SOURCE_DIRECTORY} ${CTEST_BINARY_DIRECTORY}) ctest_update(SOURCE "${CTEST_SOURCE_DIRECTORY}") ctest_submit(PARTS Update Notes) # to get CTEST_PROJECT_SUBPROJECTS definition: include("${CTEST_SOURCE_DIRECTORY}/CTestConfig.cmake") foreach(subproject ${CTEST_PROJECT_SUBPROJECTS}) message("") message("Process ${subproject}") set_property(GLOBAL PROPERTY SubProject ${subproject}) set_property(GLOBAL PROPERTY Label ${subproject}) ctest_configure(BUILD ${CTEST_BINARY_DIRECTORY} SOURCE ${CTEST_SOURCE_DIRECTORY} ) ctest_submit(PARTS Configure) set(CTEST_BUILD_TARGET "Build${subproject}") message("Build ${CTEST_BUILD_TARGET}") ctest_build(BUILD "${CTEST_BINARY_DIRECTORY}" APPEND) # builds target ${CTEST_BUILD_TARGET} ctest_submit(PARTS Build) ctest_test(BUILD "${CTEST_BINARY_DIRECTORY}" INCLUDE_LABEL "${subproject}" ) # runs only tests that have a LABELS property matching "${subproject}" ctest_coverage(BUILD "${CTEST_BINARY_DIRECTORY}" LABELS "${subproject}" ) ctest_submit(PARTS Test) endforeach() ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/scripts/check.in ================================================ #!/bin/bash # check : shorthand for make and ctest -R if [[ $# != 1 || $1 == *help ]] then echo "usage: $0 regexp" echo " Builds and runs tests matching the regexp." echo " The EIGEN_MAKE_ARGS environment variable allows to pass args to 'make'." echo " For example, to launch 5 concurrent builds, use EIGEN_MAKE_ARGS='-j5'" echo " The EIGEN_CTEST_ARGS environment variable allows to pass args to 'ctest'." echo " For example, with CTest 2.8, you can use EIGEN_CTEST_ARGS='-j5'." exit 0 fi if [ -n "${EIGEN_CTEST_ARGS:+x}" ] then ./buildtests.sh "$1" && ctest -R "$1" ${EIGEN_CTEST_ARGS} else ./buildtests.sh "$1" && ctest -R "$1" fi exit $? ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/scripts/debug.in ================================================ #!/bin/sh cmake -DCMAKE_BUILD_TYPE=Debug . ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/scripts/eigen_gen_credits.cpp ================================================ #include #include #include #include #include #include #include using namespace std; // this function takes a line that may contain a name and/or email address, // and returns just the name, while fixing the "bad cases". std::string contributor_name(const std::string& line) { string result; // let's first take care of the case of isolated email addresses, like // "user@localhost.localdomain" entries if(line.find("markb@localhost.localdomain") != string::npos) { return "Mark Borgerding"; } if(line.find("kayhman@contact.intra.cea.fr") != string::npos) { return "Guillaume Saupin"; } // from there on we assume that we have a entry of the form // either: // Bla bli Blurp // or: // Bla bli Blurp size_t position_of_email_address = line.find_first_of('<'); if(position_of_email_address != string::npos) { // there is an e-mail address in <...>. // Hauke once committed as "John Smith", fix that. if(line.find("hauke.heibel") != string::npos) result = "Hauke Heibel"; else { // just remove the e-mail address result = line.substr(0, position_of_email_address); } } else { // there is no e-mail address in <...>. if(line.find("convert-repo") != string::npos) result = ""; else result = line; } // remove trailing spaces size_t length = result.length(); while(length >= 1 && result[length-1] == ' ') result.erase(--length); return result; } // parses hg churn output to generate a contributors map. map contributors_map_from_churn_output(const char *filename) { map contributors_map; string line; ifstream churn_out; churn_out.open(filename, ios::in); while(!getline(churn_out,line).eof()) { // remove the histograms "******" that hg churn may draw at the end of some lines size_t first_star = line.find_first_of('*'); if(first_star != string::npos) line.erase(first_star); // remove trailing spaces size_t length = line.length(); while(length >= 1 && line[length-1] == ' ') line.erase(--length); // now the last space indicates where the number starts size_t last_space = line.find_last_of(' '); // get the number (of changesets or of modified lines for each contributor) int number; istringstream(line.substr(last_space+1)) >> number; // get the name of the contributor line.erase(last_space); string name = contributor_name(line); map::iterator it = contributors_map.find(name); // if new contributor, insert if(it == contributors_map.end()) contributors_map.insert(pair(name, number)); // if duplicate, just add the number else it->second += number; } churn_out.close(); return contributors_map; } // find the last name, i.e. the last word. // for "van den Schbling" types of last names, that's not a problem, that's actually what we want. string lastname(const string& name) { size_t last_space = name.find_last_of(' '); if(last_space >= name.length()-1) return name; else return name.substr(last_space+1); } struct contributor { string name; int changedlines; int changesets; string url; string misc; contributor() : changedlines(0), changesets(0) {} bool operator < (const contributor& other) { return lastname(name).compare(lastname(other.name)) < 0; } }; void add_online_info_into_contributors_list(list& contributors_list, const char *filename) { string line; ifstream online_info; online_info.open(filename, ios::in); while(!getline(online_info,line).eof()) { string hgname, realname, url, misc; size_t last_bar = line.find_last_of('|'); if(last_bar == string::npos) continue; if(last_bar < line.length()) misc = line.substr(last_bar+1); line.erase(last_bar); last_bar = line.find_last_of('|'); if(last_bar == string::npos) continue; if(last_bar < line.length()) url = line.substr(last_bar+1); line.erase(last_bar); last_bar = line.find_last_of('|'); if(last_bar == string::npos) continue; if(last_bar < line.length()) realname = line.substr(last_bar+1); line.erase(last_bar); hgname = line; // remove the example line if(hgname.find("MercurialName") != string::npos) continue; list::iterator it; for(it=contributors_list.begin(); it != contributors_list.end() && it->name != hgname; ++it) {} if(it == contributors_list.end()) { contributor c; c.name = realname; c.url = url; c.misc = misc; contributors_list.push_back(c); } else { it->name = realname; it->url = url; it->misc = misc; } } } int main() { // parse the hg churn output files map contributors_map_for_changedlines = contributors_map_from_churn_output("churn-changedlines.out"); //map contributors_map_for_changesets = contributors_map_from_churn_output("churn-changesets.out"); // merge into the contributors list list contributors_list; map::iterator it; for(it=contributors_map_for_changedlines.begin(); it != contributors_map_for_changedlines.end(); ++it) { contributor c; c.name = it->first; c.changedlines = it->second; c.changesets = 0; //contributors_map_for_changesets.find(it->first)->second; contributors_list.push_back(c); } add_online_info_into_contributors_list(contributors_list, "online-info.out"); contributors_list.sort(); cout << "{| cellpadding=\"5\"\n"; cout << "!\n"; cout << "! Lines changed\n"; cout << "!\n"; list::iterator itc; int i = 0; for(itc=contributors_list.begin(); itc != contributors_list.end(); ++itc) { if(itc->name.length() == 0) continue; if(i%2) cout << "|-\n"; else cout << "|- style=\"background:#FFFFD0\"\n"; if(itc->url.length()) cout << "| [" << itc->url << " " << itc->name << "]\n"; else cout << "| " << itc->name << "\n"; if(itc->changedlines) cout << "| " << itc->changedlines << "\n"; else cout << "| (no information)\n"; cout << "| " << itc->misc << "\n"; i++; } cout << "|}" << endl; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/scripts/eigen_gen_docs ================================================ #!/bin/sh # configuration # You should call this script with USER set as you want, else some default # will be used USER=${USER:-'orzel'} UPLOAD_DIR=dox-devel #ulimit -v 1024000 # step 1 : build rm build/doc/html -Rf mkdir build -p (cd build && cmake .. && make doc) || { echo "make failed"; exit 1; } #step 2 : upload # (the '/' at the end of path is very important, see rsync documentation) rsync -az --no-p --delete build/doc/html/ $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/$UPLOAD_DIR/ || { echo "upload failed"; exit 1; } #step 3 : fix the perm ssh $USER@ssh.tuxfamily.org "chmod -R g+w /home/eigen/eigen.tuxfamily.org-web/htdocs/$UPLOAD_DIR" || { echo "perm failed"; exit 1; } echo "Uploaded successfully" ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/scripts/eigen_gen_split_test_help.cmake ================================================ #!cmake -P file(WRITE split_test_helper.h "") foreach(i RANGE 1 999) file(APPEND split_test_helper.h "#if defined(EIGEN_TEST_PART_${i}) || defined(EIGEN_TEST_PART_ALL)\n" "#define CALL_SUBTEST_${i}(FUNC) CALL_SUBTEST(FUNC)\n" "#else\n" "#define CALL_SUBTEST_${i}(FUNC)\n" "#endif\n\n" ) endforeach() ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/scripts/eigen_monitor_perf.sh ================================================ #!/bin/bash # This is a script example to automatically update and upload performance unit tests. # The following five variables must be adjusted to match your settings. USER='ggael' UPLOAD_DIR=perf_monitoring/ggaelmacbook26 EIGEN_SOURCE_PATH=$HOME/Eigen/eigen export PREFIX="haswell-fma" export CXX_FLAGS="-mfma -w" #### BENCH_PATH=$EIGEN_SOURCE_PATH/bench/perf_monitoring/$PREFIX PREVPATH=$(pwd) cd $EIGEN_SOURCE_PATH/bench/perf_monitoring && ./runall.sh "Haswell 2.6GHz, FMA, Apple's clang" "$@" cd $PREVPATH || exit 1 ALLFILES="$BENCH_PATH/*.png $BENCH_PATH/*.html $BENCH_PATH/index.html $BENCH_PATH/s1.js $BENCH_PATH/s2.js" # (the '/' at the end of path is very important, see rsync documentation) rsync -az --no-p --delete $ALLFILES $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/$UPLOAD_DIR/ || { echo "upload failed"; exit 1; } # fix the perm ssh $USER@ssh.tuxfamily.org "chmod -R g+w /home/eigen/eigen.tuxfamily.org-web/htdocs/perf_monitoring" || { echo "perm failed"; exit 1; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/scripts/release.in ================================================ #!/bin/sh cmake -DCMAKE_BUILD_TYPE=Release . ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/scripts/relicense.py ================================================ # This file is part of Eigen, a lightweight C++ template library # for linear algebra. # # Copyright (C) 2012 Keir Mierle # # This Source Code Form is subject to the terms of the Mozilla # Public License v. 2.0. If a copy of the MPL was not distributed # with this file, You can obtain one at http://mozilla.org/MPL/2.0/. # # Author: mierle@gmail.com (Keir Mierle) # # Make the long-awaited conversion to MPL. lgpl3_header = ''' // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 3 of the License, or (at your option) any later version. // // Alternatively, you can redistribute it and/or // modify it under the terms of the GNU General Public License as // published by the Free Software Foundation; either version 2 of // the License, or (at your option) any later version. // // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the // GNU General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License and a copy of the GNU General Public License along with // Eigen. If not, see . ''' mpl2_header = """ // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. """ import os import sys exclusions = set(['relicense.py']) def update(text): if text.find(lgpl3_header) == -1: return text, False return text.replace(lgpl3_header, mpl2_header), True rootdir = sys.argv[1] for root, sub_folders, files in os.walk(rootdir): for basename in files: if basename in exclusions: print 'SKIPPED', filename continue filename = os.path.join(root, basename) fo = file(filename) text = fo.read() fo.close() text, updated = update(text) if updated: fo = file(filename, 'w') fo.write(text) fo.close() print 'UPDATED', filename else: print ' ', filename ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/signature_of_eigen3_matrix_library ================================================ This file is just there as a signature to help identify directories containing Eigen3. When writing a script looking for Eigen3, just look for this file. This is especially useful to help disambiguate with Eigen2... ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/AnnoyingScalar.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011-2018 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TEST_ANNOYING_SCALAR_H #define EIGEN_TEST_ANNOYING_SCALAR_H #include #if EIGEN_COMP_GNUC #pragma GCC diagnostic ignored "-Wshadow" #endif #ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW struct my_exception { my_exception() {} ~my_exception() {} }; #endif // An AnnoyingScalar is a pseudo scalar type that: // - can randomly through an exception in operator + // - randomly allocate on the heap or initialize a reference to itself making it non trivially copyable, nor movable, nor relocatable. class AnnoyingScalar { public: AnnoyingScalar() { init(); *v = 0; } AnnoyingScalar(long double _v) { init(); *v = _v; } AnnoyingScalar(double _v) { init(); *v = _v; } AnnoyingScalar(float _v) { init(); *v = _v; } AnnoyingScalar(int _v) { init(); *v = _v; } AnnoyingScalar(long _v) { init(); *v = _v; } #if EIGEN_HAS_CXX11 AnnoyingScalar(long long _v) { init(); *v = _v; } #endif AnnoyingScalar(const AnnoyingScalar& other) { init(); *v = *(other.v); } ~AnnoyingScalar() { if(v!=&data) delete v; instances--; } void init() { if(internal::random()) v = new float; else v = &data; instances++; } AnnoyingScalar operator+(const AnnoyingScalar& other) const { #ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW countdown--; if(countdown<=0 && !dont_throw) throw my_exception(); #endif return AnnoyingScalar(*v+*other.v); } AnnoyingScalar operator-() const { return AnnoyingScalar(-*v); } AnnoyingScalar operator-(const AnnoyingScalar& other) const { return AnnoyingScalar(*v-*other.v); } AnnoyingScalar operator*(const AnnoyingScalar& other) const { return AnnoyingScalar((*v)*(*other.v)); } AnnoyingScalar operator/(const AnnoyingScalar& other) const { return AnnoyingScalar((*v)/(*other.v)); } AnnoyingScalar& operator+=(const AnnoyingScalar& other) { *v += *other.v; return *this; } AnnoyingScalar& operator-=(const AnnoyingScalar& other) { *v -= *other.v; return *this; } AnnoyingScalar& operator*=(const AnnoyingScalar& other) { *v *= *other.v; return *this; } AnnoyingScalar& operator/=(const AnnoyingScalar& other) { *v /= *other.v; return *this; } AnnoyingScalar& operator= (const AnnoyingScalar& other) { *v = *other.v; return *this; } bool operator==(const AnnoyingScalar& other) const { return *v == *other.v; } bool operator!=(const AnnoyingScalar& other) const { return *v != *other.v; } bool operator<=(const AnnoyingScalar& other) const { return *v <= *other.v; } bool operator< (const AnnoyingScalar& other) const { return *v < *other.v; } bool operator>=(const AnnoyingScalar& other) const { return *v >= *other.v; } bool operator> (const AnnoyingScalar& other) const { return *v > *other.v; } float* v; float data; static int instances; #ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW static int countdown; static bool dont_throw; #endif }; AnnoyingScalar real(const AnnoyingScalar &x) { return x; } AnnoyingScalar imag(const AnnoyingScalar & ) { return 0; } AnnoyingScalar conj(const AnnoyingScalar &x) { return x; } AnnoyingScalar sqrt(const AnnoyingScalar &x) { return std::sqrt(*x.v); } AnnoyingScalar abs (const AnnoyingScalar &x) { return std::abs(*x.v); } AnnoyingScalar cos (const AnnoyingScalar &x) { return std::cos(*x.v); } AnnoyingScalar sin (const AnnoyingScalar &x) { return std::sin(*x.v); } AnnoyingScalar acos(const AnnoyingScalar &x) { return std::acos(*x.v); } AnnoyingScalar atan2(const AnnoyingScalar &y,const AnnoyingScalar &x) { return std::atan2(*y.v,*x.v); } std::ostream& operator<<(std::ostream& stream,const AnnoyingScalar& x) { stream << (*(x.v)); return stream; } int AnnoyingScalar::instances = 0; #ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW int AnnoyingScalar::countdown = 0; bool AnnoyingScalar::dont_throw = false; #endif namespace Eigen { template<> struct NumTraits : NumTraits { enum { RequireInitialization = 1, }; typedef AnnoyingScalar Real; typedef AnnoyingScalar Nested; typedef AnnoyingScalar Literal; typedef AnnoyingScalar NonInteger; }; template<> inline AnnoyingScalar test_precision() { return test_precision(); } namespace numext { template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool (isfinite)(const AnnoyingScalar& x) { return (numext::isfinite)(*x.v); } } namespace internal { template<> EIGEN_STRONG_INLINE double cast(const AnnoyingScalar& x) { return double(*x.v); } template<> EIGEN_STRONG_INLINE float cast(const AnnoyingScalar& x) { return *x.v; } } } // namespace Eigen AnnoyingScalar get_test_precision(const AnnoyingScalar&) { return Eigen::test_precision(); } AnnoyingScalar test_relative_error(const AnnoyingScalar &a, const AnnoyingScalar &b) { return test_relative_error(*a.v, *b.v); } inline bool test_isApprox(const AnnoyingScalar &a, const AnnoyingScalar &b) { return internal::isApprox(*a.v, *b.v, test_precision()); } inline bool test_isMuchSmallerThan(const AnnoyingScalar &a, const AnnoyingScalar &b) { return test_isMuchSmallerThan(*a.v, *b.v); } #endif // EIGEN_TEST_ANNOYING_SCALAR_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/MovableScalar.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2020 Sebastien Boisvert // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MISC_MOVABLE_SCALAR_H #define EIGEN_MISC_MOVABLE_SCALAR_H #include namespace Eigen { template > struct MovableScalar : public Base { MovableScalar() = default; ~MovableScalar() = default; MovableScalar(const MovableScalar&) = default; MovableScalar(MovableScalar&& other) = default; MovableScalar& operator=(const MovableScalar&) = default; MovableScalar& operator=(MovableScalar&& other) = default; MovableScalar(Scalar scalar) : Base(100, scalar) {} operator Scalar() const { return this->size() > 0 ? this->back() : Scalar(); } }; template<> struct NumTraits> : GenericNumTraits {}; } #endif ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/OffByOneScalar.h ================================================ // A Scalar with internal representation T+1 so that zero is internally // represented by T(1). This is used to test memory fill. // template class OffByOneScalar { public: OffByOneScalar() : val_(1) {} OffByOneScalar(const OffByOneScalar& other) { *this = other; } OffByOneScalar& operator=(const OffByOneScalar& other) { val_ = other.val_; return *this; } OffByOneScalar(T val) : val_(val + 1) {} OffByOneScalar& operator=(T val) { val_ = val + 1; } operator T() const { return val_ - 1; } private: T val_; }; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/SafeScalar.h ================================================ // A Scalar that asserts for uninitialized access. template class SafeScalar { public: SafeScalar() : initialized_(false) {} SafeScalar(const SafeScalar& other) { *this = other; } SafeScalar& operator=(const SafeScalar& other) { val_ = T(other); initialized_ = true; return *this; } SafeScalar(T val) : val_(val), initialized_(true) {} SafeScalar& operator=(T val) { val_ = val; initialized_ = true; } operator T() const { VERIFY(initialized_ && "Uninitialized access."); return val_; } private: T val_; bool initialized_; }; ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/adjoint.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" template struct adjoint_specific; template<> struct adjoint_specific { template static void run(const Vec& v1, const Vec& v2, Vec& v3, const Mat& square, Scalar s1, Scalar s2) { VERIFY(test_isApproxWithRef((s1 * v1 + s2 * v2).dot(v3), numext::conj(s1) * v1.dot(v3) + numext::conj(s2) * v2.dot(v3), 0)); VERIFY(test_isApproxWithRef(v3.dot(s1 * v1 + s2 * v2), s1*v3.dot(v1)+s2*v3.dot(v2), 0)); // check compatibility of dot and adjoint VERIFY(test_isApproxWithRef(v1.dot(square * v2), (square.adjoint() * v1).dot(v2), 0)); } }; template<> struct adjoint_specific { template static void run(const Vec& v1, const Vec& v2, Vec& v3, const Mat& square, Scalar s1, Scalar s2) { typedef typename NumTraits::Real RealScalar; using std::abs; RealScalar ref = NumTraits::IsInteger ? RealScalar(0) : (std::max)((s1 * v1 + s2 * v2).norm(),v3.norm()); VERIFY(test_isApproxWithRef((s1 * v1 + s2 * v2).dot(v3), numext::conj(s1) * v1.dot(v3) + numext::conj(s2) * v2.dot(v3), ref)); VERIFY(test_isApproxWithRef(v3.dot(s1 * v1 + s2 * v2), s1*v3.dot(v1)+s2*v3.dot(v2), ref)); VERIFY_IS_APPROX(v1.squaredNorm(), v1.norm() * v1.norm()); // check normalized() and normalize() VERIFY_IS_APPROX(v1, v1.norm() * v1.normalized()); v3 = v1; v3.normalize(); VERIFY_IS_APPROX(v1, v1.norm() * v3); VERIFY_IS_APPROX(v3, v1.normalized()); VERIFY_IS_APPROX(v3.norm(), RealScalar(1)); // check null inputs VERIFY_IS_APPROX((v1*0).normalized(), (v1*0)); #if (!EIGEN_ARCH_i386) || defined(EIGEN_VECTORIZE) RealScalar very_small = (std::numeric_limits::min)(); VERIFY( (v1*very_small).norm() == 0 ); VERIFY_IS_APPROX((v1*very_small).normalized(), (v1*very_small)); v3 = v1*very_small; v3.normalize(); VERIFY_IS_APPROX(v3, (v1*very_small)); #endif // check compatibility of dot and adjoint ref = NumTraits::IsInteger ? 0 : (std::max)((std::max)(v1.norm(),v2.norm()),(std::max)((square * v2).norm(),(square.adjoint() * v1).norm())); VERIFY(internal::isMuchSmallerThan(abs(v1.dot(square * v2) - (square.adjoint() * v1).dot(v2)), ref, test_precision())); // check that Random().normalized() works: tricky as the random xpr must be evaluated by // normalized() in order to produce a consistent result. VERIFY_IS_APPROX(Vec::Random(v1.size()).normalized().norm(), RealScalar(1)); } }; template void adjoint(const MatrixType& m) { /* this test covers the following files: Transpose.h Conjugate.h Dot.h */ using std::abs; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef Matrix VectorType; typedef Matrix SquareMatrixType; const Index PacketSize = internal::packet_traits::size; Index rows = m.rows(); Index cols = m.cols(); MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), m3(rows, cols), square = SquareMatrixType::Random(rows, rows); VectorType v1 = VectorType::Random(rows), v2 = VectorType::Random(rows), v3 = VectorType::Random(rows), vzero = VectorType::Zero(rows); Scalar s1 = internal::random(), s2 = internal::random(); // check basic compatibility of adjoint, transpose, conjugate VERIFY_IS_APPROX(m1.transpose().conjugate().adjoint(), m1); VERIFY_IS_APPROX(m1.adjoint().conjugate().transpose(), m1); // check multiplicative behavior VERIFY_IS_APPROX((m1.adjoint() * m2).adjoint(), m2.adjoint() * m1); VERIFY_IS_APPROX((s1 * m1).adjoint(), numext::conj(s1) * m1.adjoint()); // check basic properties of dot, squaredNorm VERIFY_IS_APPROX(numext::conj(v1.dot(v2)), v2.dot(v1)); VERIFY_IS_APPROX(numext::real(v1.dot(v1)), v1.squaredNorm()); adjoint_specific::IsInteger>::run(v1, v2, v3, square, s1, s2); VERIFY_IS_MUCH_SMALLER_THAN(abs(vzero.dot(v1)), static_cast(1)); // like in testBasicStuff, test operator() to check const-qualification Index r = internal::random(0, rows-1), c = internal::random(0, cols-1); VERIFY_IS_APPROX(m1.conjugate()(r,c), numext::conj(m1(r,c))); VERIFY_IS_APPROX(m1.adjoint()(c,r), numext::conj(m1(r,c))); // check inplace transpose m3 = m1; m3.transposeInPlace(); VERIFY_IS_APPROX(m3,m1.transpose()); m3.transposeInPlace(); VERIFY_IS_APPROX(m3,m1); if(PacketSize(0,m3.rows()-PacketSize); Index j = internal::random(0,m3.cols()-PacketSize); m3.template block(i,j).transposeInPlace(); VERIFY_IS_APPROX( (m3.template block(i,j)), (m1.template block(i,j).transpose()) ); m3.template block(i,j).transposeInPlace(); VERIFY_IS_APPROX(m3,m1); } // check inplace adjoint m3 = m1; m3.adjointInPlace(); VERIFY_IS_APPROX(m3,m1.adjoint()); m3.transposeInPlace(); VERIFY_IS_APPROX(m3,m1.conjugate()); // check mixed dot product typedef Matrix RealVectorType; RealVectorType rv1 = RealVectorType::Random(rows); VERIFY_IS_APPROX(v1.dot(rv1.template cast()), v1.dot(rv1)); VERIFY_IS_APPROX(rv1.template cast().dot(v1), rv1.dot(v1)); VERIFY( is_same_type(m1,m1.template conjugateIf()) ); VERIFY( is_same_type(m1.conjugate(),m1.template conjugateIf()) ); } template void adjoint_extra() { MatrixXcf a(10,10), b(10,10); VERIFY_RAISES_ASSERT(a = a.transpose()); VERIFY_RAISES_ASSERT(a = a.transpose() + b); VERIFY_RAISES_ASSERT(a = b + a.transpose()); VERIFY_RAISES_ASSERT(a = a.conjugate().transpose()); VERIFY_RAISES_ASSERT(a = a.adjoint()); VERIFY_RAISES_ASSERT(a = a.adjoint() + b); VERIFY_RAISES_ASSERT(a = b + a.adjoint()); // no assertion should be triggered for these cases: a.transpose() = a.transpose(); a.transpose() += a.transpose(); a.transpose() += a.transpose() + b; a.transpose() = a.adjoint(); a.transpose() += a.adjoint(); a.transpose() += a.adjoint() + b; // regression tests for check_for_aliasing MatrixXd c(10,10); c = 1.0 * MatrixXd::Ones(10,10) + c; c = MatrixXd::Ones(10,10) * 1.0 + c; c = c + MatrixXd::Ones(10,10) .cwiseProduct( MatrixXd::Zero(10,10) ); c = MatrixXd::Ones(10,10) * MatrixXd::Zero(10,10); // regression for bug 1646 for (int j = 0; j < 10; ++j) { c.col(j).head(j) = c.row(j).head(j); } for (int j = 0; j < 10; ++j) { c.col(j) = c.row(j); } a.conservativeResize(1,1); a = a.transpose(); a.conservativeResize(0,0); a = a.transpose(); } EIGEN_DECLARE_TEST(adjoint) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( adjoint(Matrix()) ); CALL_SUBTEST_2( adjoint(Matrix3d()) ); CALL_SUBTEST_3( adjoint(Matrix4f()) ); CALL_SUBTEST_4( adjoint(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); CALL_SUBTEST_5( adjoint(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( adjoint(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); // Complement for 128 bits vectorization: CALL_SUBTEST_8( adjoint(Matrix2d()) ); CALL_SUBTEST_9( adjoint(Matrix()) ); // 256 bits vectorization: CALL_SUBTEST_10( adjoint(Matrix()) ); CALL_SUBTEST_11( adjoint(Matrix()) ); CALL_SUBTEST_12( adjoint(Matrix()) ); } // test a large static matrix only once CALL_SUBTEST_7( adjoint(Matrix()) ); CALL_SUBTEST_13( adjoint_extra<0>() ); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/array_cwise.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" // Test the corner cases of pow(x, y) for real types. template void pow_test() { const Scalar zero = Scalar(0); const Scalar eps = Eigen::NumTraits::epsilon(); const Scalar one = Scalar(1); const Scalar two = Scalar(2); const Scalar three = Scalar(3); const Scalar sqrt_half = Scalar(std::sqrt(0.5)); const Scalar sqrt2 = Scalar(std::sqrt(2)); const Scalar inf = Eigen::NumTraits::infinity(); const Scalar nan = Eigen::NumTraits::quiet_NaN(); const Scalar denorm_min = std::numeric_limits::denorm_min(); const Scalar min = (std::numeric_limits::min)(); const Scalar max = (std::numeric_limits::max)(); const Scalar max_exp = (static_cast(int(Eigen::NumTraits::max_exponent())) * Scalar(EIGEN_LN2)) / eps; const static Scalar abs_vals[] = {zero, denorm_min, min, eps, sqrt_half, one, sqrt2, two, three, max_exp, max, inf, nan}; const int abs_cases = 13; const int num_cases = 2*abs_cases * 2*abs_cases; // Repeat the same value to make sure we hit the vectorized path. const int num_repeats = 32; Array x(num_repeats, num_cases); Array y(num_repeats, num_cases); int count = 0; for (int i = 0; i < abs_cases; ++i) { const Scalar abs_x = abs_vals[i]; for (int sign_x = 0; sign_x < 2; ++sign_x) { Scalar x_case = sign_x == 0 ? -abs_x : abs_x; for (int j = 0; j < abs_cases; ++j) { const Scalar abs_y = abs_vals[j]; for (int sign_y = 0; sign_y < 2; ++sign_y) { Scalar y_case = sign_y == 0 ? -abs_y : abs_y; for (int repeat = 0; repeat < num_repeats; ++repeat) { x(repeat, count) = x_case; y(repeat, count) = y_case; } ++count; } } } } Array actual = x.pow(y); const Scalar tol = test_precision(); bool all_pass = true; for (int i = 0; i < 1; ++i) { for (int j = 0; j < num_cases; ++j) { Scalar e = static_cast(std::pow(x(i,j), y(i,j))); Scalar a = actual(i, j); bool fail = !(a==e) && !internal::isApprox(a, e, tol) && !((numext::isnan)(a) && (numext::isnan)(e)); all_pass &= !fail; if (fail) { std::cout << "pow(" << x(i,j) << "," << y(i,j) << ") = " << a << " != " << e << std::endl; } } } VERIFY(all_pass); } template void array(const ArrayType& m) { typedef typename ArrayType::Scalar Scalar; typedef typename ArrayType::RealScalar RealScalar; typedef Array ColVectorType; typedef Array RowVectorType; Index rows = m.rows(); Index cols = m.cols(); ArrayType m1 = ArrayType::Random(rows, cols), m2 = ArrayType::Random(rows, cols), m3(rows, cols); ArrayType m4 = m1; // copy constructor VERIFY_IS_APPROX(m1, m4); ColVectorType cv1 = ColVectorType::Random(rows); RowVectorType rv1 = RowVectorType::Random(cols); Scalar s1 = internal::random(), s2 = internal::random(); // scalar addition VERIFY_IS_APPROX(m1 + s1, s1 + m1); VERIFY_IS_APPROX(m1 + s1, ArrayType::Constant(rows,cols,s1) + m1); VERIFY_IS_APPROX(s1 - m1, (-m1)+s1 ); VERIFY_IS_APPROX(m1 - s1, m1 - ArrayType::Constant(rows,cols,s1)); VERIFY_IS_APPROX(s1 - m1, ArrayType::Constant(rows,cols,s1) - m1); VERIFY_IS_APPROX((m1*Scalar(2)) - s2, (m1+m1) - ArrayType::Constant(rows,cols,s2) ); m3 = m1; m3 += s2; VERIFY_IS_APPROX(m3, m1 + s2); m3 = m1; m3 -= s1; VERIFY_IS_APPROX(m3, m1 - s1); // scalar operators via Maps m3 = m1; ArrayType::Map(m1.data(), m1.rows(), m1.cols()) -= ArrayType::Map(m2.data(), m2.rows(), m2.cols()); VERIFY_IS_APPROX(m1, m3 - m2); m3 = m1; ArrayType::Map(m1.data(), m1.rows(), m1.cols()) += ArrayType::Map(m2.data(), m2.rows(), m2.cols()); VERIFY_IS_APPROX(m1, m3 + m2); m3 = m1; ArrayType::Map(m1.data(), m1.rows(), m1.cols()) *= ArrayType::Map(m2.data(), m2.rows(), m2.cols()); VERIFY_IS_APPROX(m1, m3 * m2); m3 = m1; m2 = ArrayType::Random(rows,cols); m2 = (m2==0).select(1,m2); ArrayType::Map(m1.data(), m1.rows(), m1.cols()) /= ArrayType::Map(m2.data(), m2.rows(), m2.cols()); VERIFY_IS_APPROX(m1, m3 / m2); // reductions VERIFY_IS_APPROX(m1.abs().colwise().sum().sum(), m1.abs().sum()); VERIFY_IS_APPROX(m1.abs().rowwise().sum().sum(), m1.abs().sum()); using std::abs; VERIFY_IS_MUCH_SMALLER_THAN(abs(m1.colwise().sum().sum() - m1.sum()), m1.abs().sum()); VERIFY_IS_MUCH_SMALLER_THAN(abs(m1.rowwise().sum().sum() - m1.sum()), m1.abs().sum()); if (!internal::isMuchSmallerThan(abs(m1.sum() - (m1+m2).sum()), m1.abs().sum(), test_precision())) VERIFY_IS_NOT_APPROX(((m1+m2).rowwise().sum()).sum(), m1.sum()); VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op())); // vector-wise ops m3 = m1; VERIFY_IS_APPROX(m3.colwise() += cv1, m1.colwise() + cv1); m3 = m1; VERIFY_IS_APPROX(m3.colwise() -= cv1, m1.colwise() - cv1); m3 = m1; VERIFY_IS_APPROX(m3.rowwise() += rv1, m1.rowwise() + rv1); m3 = m1; VERIFY_IS_APPROX(m3.rowwise() -= rv1, m1.rowwise() - rv1); // Conversion from scalar VERIFY_IS_APPROX((m3 = s1), ArrayType::Constant(rows,cols,s1)); VERIFY_IS_APPROX((m3 = 1), ArrayType::Constant(rows,cols,1)); VERIFY_IS_APPROX((m3.topLeftCorner(rows,cols) = 1), ArrayType::Constant(rows,cols,1)); typedef Array FixedArrayType; { FixedArrayType f1(s1); VERIFY_IS_APPROX(f1, FixedArrayType::Constant(s1)); FixedArrayType f2(numext::real(s1)); VERIFY_IS_APPROX(f2, FixedArrayType::Constant(numext::real(s1))); FixedArrayType f3((int)100*numext::real(s1)); VERIFY_IS_APPROX(f3, FixedArrayType::Constant((int)100*numext::real(s1))); f1.setRandom(); FixedArrayType f4(f1.data()); VERIFY_IS_APPROX(f4, f1); } #if EIGEN_HAS_CXX11 { FixedArrayType f1{s1}; VERIFY_IS_APPROX(f1, FixedArrayType::Constant(s1)); FixedArrayType f2{numext::real(s1)}; VERIFY_IS_APPROX(f2, FixedArrayType::Constant(numext::real(s1))); FixedArrayType f3{(int)100*numext::real(s1)}; VERIFY_IS_APPROX(f3, FixedArrayType::Constant((int)100*numext::real(s1))); f1.setRandom(); FixedArrayType f4{f1.data()}; VERIFY_IS_APPROX(f4, f1); } #endif // pow VERIFY_IS_APPROX(m1.pow(2), m1.square()); VERIFY_IS_APPROX(pow(m1,2), m1.square()); VERIFY_IS_APPROX(m1.pow(3), m1.cube()); VERIFY_IS_APPROX(pow(m1,3), m1.cube()); VERIFY_IS_APPROX((-m1).pow(3), -m1.cube()); VERIFY_IS_APPROX(pow(2*m1,3), 8*m1.cube()); ArrayType exponents = ArrayType::Constant(rows, cols, RealScalar(2)); VERIFY_IS_APPROX(Eigen::pow(m1,exponents), m1.square()); VERIFY_IS_APPROX(m1.pow(exponents), m1.square()); VERIFY_IS_APPROX(Eigen::pow(2*m1,exponents), 4*m1.square()); VERIFY_IS_APPROX((2*m1).pow(exponents), 4*m1.square()); VERIFY_IS_APPROX(Eigen::pow(m1,2*exponents), m1.square().square()); VERIFY_IS_APPROX(m1.pow(2*exponents), m1.square().square()); VERIFY_IS_APPROX(Eigen::pow(m1(0,0), exponents), ArrayType::Constant(rows,cols,m1(0,0)*m1(0,0))); // Check possible conflicts with 1D ctor typedef Array OneDArrayType; { OneDArrayType o1(rows); VERIFY(o1.size()==rows); OneDArrayType o2(static_cast(rows)); VERIFY(o2.size()==rows); } #if EIGEN_HAS_CXX11 { OneDArrayType o1{rows}; VERIFY(o1.size()==rows); OneDArrayType o4{int(rows)}; VERIFY(o4.size()==rows); } #endif // Check possible conflicts with 2D ctor typedef Array TwoDArrayType; typedef Array ArrayType2; { TwoDArrayType o1(rows,cols); VERIFY(o1.rows()==rows); VERIFY(o1.cols()==cols); TwoDArrayType o2(static_cast(rows),static_cast(cols)); VERIFY(o2.rows()==rows); VERIFY(o2.cols()==cols); ArrayType2 o3(rows,cols); VERIFY(o3(0)==Scalar(rows) && o3(1)==Scalar(cols)); ArrayType2 o4(static_cast(rows),static_cast(cols)); VERIFY(o4(0)==Scalar(rows) && o4(1)==Scalar(cols)); } #if EIGEN_HAS_CXX11 { TwoDArrayType o1{rows,cols}; VERIFY(o1.rows()==rows); VERIFY(o1.cols()==cols); TwoDArrayType o2{int(rows),int(cols)}; VERIFY(o2.rows()==rows); VERIFY(o2.cols()==cols); ArrayType2 o3{rows,cols}; VERIFY(o3(0)==Scalar(rows) && o3(1)==Scalar(cols)); ArrayType2 o4{int(rows),int(cols)}; VERIFY(o4(0)==Scalar(rows) && o4(1)==Scalar(cols)); } #endif } template void comparisons(const ArrayType& m) { using std::abs; typedef typename ArrayType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; Index rows = m.rows(); Index cols = m.cols(); Index r = internal::random(0, rows-1), c = internal::random(0, cols-1); ArrayType m1 = ArrayType::Random(rows, cols), m2 = ArrayType::Random(rows, cols), m3(rows, cols), m4 = m1; m4 = (m4.abs()==Scalar(0)).select(1,m4); VERIFY(((m1 + Scalar(1)) > m1).all()); VERIFY(((m1 - Scalar(1)) < m1).all()); if (rows*cols>1) { m3 = m1; m3(r,c) += 1; VERIFY(! (m1 < m3).all() ); VERIFY(! (m1 > m3).all() ); } VERIFY(!(m1 > m2 && m1 < m2).any()); VERIFY((m1 <= m2 || m1 >= m2).all()); // comparisons array to scalar VERIFY( (m1 != (m1(r,c)+1) ).any() ); VERIFY( (m1 > (m1(r,c)-1) ).any() ); VERIFY( (m1 < (m1(r,c)+1) ).any() ); VERIFY( (m1 == m1(r,c) ).any() ); // comparisons scalar to array VERIFY( ( (m1(r,c)+1) != m1).any() ); VERIFY( ( (m1(r,c)-1) < m1).any() ); VERIFY( ( (m1(r,c)+1) > m1).any() ); VERIFY( ( m1(r,c) == m1).any() ); // test Select VERIFY_IS_APPROX( (m1m2).select(m1,m2), m1.cwiseMax(m2) ); Scalar mid = (m1.cwiseAbs().minCoeff() + m1.cwiseAbs().maxCoeff())/Scalar(2); for (int j=0; j=ArrayType::Constant(rows,cols,mid)) .select(m1,0), m3); // even shorter version: VERIFY_IS_APPROX( (m1.abs()RealScalar(0.1)).count() == rows*cols); // and/or VERIFY( (m1RealScalar(0)).count() == 0); VERIFY( (m1=RealScalar(0)).count() == rows*cols); RealScalar a = m1.abs().mean(); VERIFY( (m1<-a || m1>a).count() == (m1.abs()>a).count()); typedef Array ArrayOfIndices; // TODO allows colwise/rowwise for array VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).colwise().count(), ArrayOfIndices::Constant(cols,rows).transpose()); VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).rowwise().count(), ArrayOfIndices::Constant(rows, cols)); } template void array_real(const ArrayType& m) { using std::abs; using std::sqrt; typedef typename ArrayType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; Index rows = m.rows(); Index cols = m.cols(); ArrayType m1 = ArrayType::Random(rows, cols), m2 = ArrayType::Random(rows, cols), m3(rows, cols), m4 = m1; m4 = (m4.abs()==Scalar(0)).select(Scalar(1),m4); Scalar s1 = internal::random(); // these tests are mostly to check possible compilation issues with free-functions. VERIFY_IS_APPROX(m1.sin(), sin(m1)); VERIFY_IS_APPROX(m1.cos(), cos(m1)); VERIFY_IS_APPROX(m1.tan(), tan(m1)); VERIFY_IS_APPROX(m1.asin(), asin(m1)); VERIFY_IS_APPROX(m1.acos(), acos(m1)); VERIFY_IS_APPROX(m1.atan(), atan(m1)); VERIFY_IS_APPROX(m1.sinh(), sinh(m1)); VERIFY_IS_APPROX(m1.cosh(), cosh(m1)); VERIFY_IS_APPROX(m1.tanh(), tanh(m1)); #if EIGEN_HAS_CXX11_MATH VERIFY_IS_APPROX(m1.tanh().atanh(), atanh(tanh(m1))); VERIFY_IS_APPROX(m1.sinh().asinh(), asinh(sinh(m1))); VERIFY_IS_APPROX(m1.cosh().acosh(), acosh(cosh(m1))); #endif VERIFY_IS_APPROX(m1.logistic(), logistic(m1)); VERIFY_IS_APPROX(m1.arg(), arg(m1)); VERIFY_IS_APPROX(m1.round(), round(m1)); VERIFY_IS_APPROX(m1.rint(), rint(m1)); VERIFY_IS_APPROX(m1.floor(), floor(m1)); VERIFY_IS_APPROX(m1.ceil(), ceil(m1)); VERIFY((m1.isNaN() == (Eigen::isnan)(m1)).all()); VERIFY((m1.isInf() == (Eigen::isinf)(m1)).all()); VERIFY((m1.isFinite() == (Eigen::isfinite)(m1)).all()); VERIFY_IS_APPROX(m4.inverse(), inverse(m4)); VERIFY_IS_APPROX(m1.abs(), abs(m1)); VERIFY_IS_APPROX(m1.abs2(), abs2(m1)); VERIFY_IS_APPROX(m1.square(), square(m1)); VERIFY_IS_APPROX(m1.cube(), cube(m1)); VERIFY_IS_APPROX(cos(m1+RealScalar(3)*m2), cos((m1+RealScalar(3)*m2).eval())); VERIFY_IS_APPROX(m1.sign(), sign(m1)); VERIFY((m1.sqrt().sign().isNaN() == (Eigen::isnan)(sign(sqrt(m1)))).all()); // avoid inf and NaNs so verification doesn't fail m3 = m4.abs(); VERIFY_IS_APPROX(m3.sqrt(), sqrt(abs(m3))); VERIFY_IS_APPROX(m3.rsqrt(), Scalar(1)/sqrt(abs(m3))); VERIFY_IS_APPROX(rsqrt(m3), Scalar(1)/sqrt(abs(m3))); VERIFY_IS_APPROX(m3.log(), log(m3)); VERIFY_IS_APPROX(m3.log1p(), log1p(m3)); VERIFY_IS_APPROX(m3.log10(), log10(m3)); VERIFY_IS_APPROX(m3.log2(), log2(m3)); VERIFY((!(m1>m2) == (m1<=m2)).all()); VERIFY_IS_APPROX(sin(m1.asin()), m1); VERIFY_IS_APPROX(cos(m1.acos()), m1); VERIFY_IS_APPROX(tan(m1.atan()), m1); VERIFY_IS_APPROX(sinh(m1), Scalar(0.5)*(exp(m1)-exp(-m1))); VERIFY_IS_APPROX(cosh(m1), Scalar(0.5)*(exp(m1)+exp(-m1))); VERIFY_IS_APPROX(tanh(m1), (Scalar(0.5)*(exp(m1)-exp(-m1)))/(Scalar(0.5)*(exp(m1)+exp(-m1)))); VERIFY_IS_APPROX(logistic(m1), (Scalar(1)/(Scalar(1)+exp(-m1)))); VERIFY_IS_APPROX(arg(m1), ((m1())*Scalar(std::acos(Scalar(-1)))); VERIFY((round(m1) <= ceil(m1) && round(m1) >= floor(m1)).all()); VERIFY((rint(m1) <= ceil(m1) && rint(m1) >= floor(m1)).all()); VERIFY(((ceil(m1) - round(m1)) <= Scalar(0.5) || (round(m1) - floor(m1)) <= Scalar(0.5)).all()); VERIFY(((ceil(m1) - round(m1)) <= Scalar(1.0) && (round(m1) - floor(m1)) <= Scalar(1.0)).all()); VERIFY(((ceil(m1) - rint(m1)) <= Scalar(0.5) || (rint(m1) - floor(m1)) <= Scalar(0.5)).all()); VERIFY(((ceil(m1) - rint(m1)) <= Scalar(1.0) && (rint(m1) - floor(m1)) <= Scalar(1.0)).all()); VERIFY((Eigen::isnan)((m1*Scalar(0))/Scalar(0)).all()); VERIFY((Eigen::isinf)(m4/Scalar(0)).all()); VERIFY(((Eigen::isfinite)(m1) && (!(Eigen::isfinite)(m1*Scalar(0)/Scalar(0))) && (!(Eigen::isfinite)(m4/Scalar(0)))).all()); VERIFY_IS_APPROX(inverse(inverse(m4)),m4); VERIFY((abs(m1) == m1 || abs(m1) == -m1).all()); VERIFY_IS_APPROX(m3, sqrt(abs2(m3))); VERIFY_IS_APPROX(m1.absolute_difference(m2), (m1 > m2).select(m1 - m2, m2 - m1)); VERIFY_IS_APPROX( m1.sign(), -(-m1).sign() ); VERIFY_IS_APPROX( m1*m1.sign(),m1.abs()); VERIFY_IS_APPROX(m1.sign() * m1.abs(), m1); VERIFY_IS_APPROX(numext::abs2(numext::real(m1)) + numext::abs2(numext::imag(m1)), numext::abs2(m1)); VERIFY_IS_APPROX(numext::abs2(Eigen::real(m1)) + numext::abs2(Eigen::imag(m1)), numext::abs2(m1)); if(!NumTraits::IsComplex) VERIFY_IS_APPROX(numext::real(m1), m1); // shift argument of logarithm so that it is not zero Scalar smallNumber = NumTraits::dummy_precision(); VERIFY_IS_APPROX((m3 + smallNumber).log() , log(abs(m3) + smallNumber)); VERIFY_IS_APPROX((m3 + smallNumber + Scalar(1)).log() , log1p(abs(m3) + smallNumber)); VERIFY_IS_APPROX(m1.exp() * m2.exp(), exp(m1+m2)); VERIFY_IS_APPROX(m1.exp(), exp(m1)); VERIFY_IS_APPROX(m1.exp() / m2.exp(),(m1-m2).exp()); VERIFY_IS_APPROX(m1.expm1(), expm1(m1)); VERIFY_IS_APPROX((m3 + smallNumber).exp() - Scalar(1), expm1(abs(m3) + smallNumber)); VERIFY_IS_APPROX(m3.pow(RealScalar(0.5)), m3.sqrt()); VERIFY_IS_APPROX(pow(m3,RealScalar(0.5)), m3.sqrt()); VERIFY_IS_APPROX(m3.pow(RealScalar(-0.5)), m3.rsqrt()); VERIFY_IS_APPROX(pow(m3,RealScalar(-0.5)), m3.rsqrt()); // Avoid inf and NaN. m3 = (m1.square()::epsilon()).select(Scalar(1),m3); VERIFY_IS_APPROX(m3.pow(RealScalar(-2)), m3.square().inverse()); pow_test(); VERIFY_IS_APPROX(log10(m3), log(m3)/numext::log(Scalar(10))); VERIFY_IS_APPROX(log2(m3), log(m3)/numext::log(Scalar(2))); // scalar by array division const RealScalar tiny = sqrt(std::numeric_limits::epsilon()); s1 += Scalar(tiny); m1 += ArrayType::Constant(rows,cols,Scalar(tiny)); VERIFY_IS_APPROX(s1/m1, s1 * m1.inverse()); // check inplace transpose m3 = m1; m3.transposeInPlace(); VERIFY_IS_APPROX(m3, m1.transpose()); m3.transposeInPlace(); VERIFY_IS_APPROX(m3, m1); } template void array_complex(const ArrayType& m) { typedef typename ArrayType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; Index rows = m.rows(); Index cols = m.cols(); ArrayType m1 = ArrayType::Random(rows, cols), m2(rows, cols), m4 = m1; m4.real() = (m4.real().abs()==RealScalar(0)).select(RealScalar(1),m4.real()); m4.imag() = (m4.imag().abs()==RealScalar(0)).select(RealScalar(1),m4.imag()); Array m3(rows, cols); for (Index i = 0; i < m.rows(); ++i) for (Index j = 0; j < m.cols(); ++j) m2(i,j) = sqrt(m1(i,j)); // these tests are mostly to check possible compilation issues with free-functions. VERIFY_IS_APPROX(m1.sin(), sin(m1)); VERIFY_IS_APPROX(m1.cos(), cos(m1)); VERIFY_IS_APPROX(m1.tan(), tan(m1)); VERIFY_IS_APPROX(m1.sinh(), sinh(m1)); VERIFY_IS_APPROX(m1.cosh(), cosh(m1)); VERIFY_IS_APPROX(m1.tanh(), tanh(m1)); VERIFY_IS_APPROX(m1.logistic(), logistic(m1)); VERIFY_IS_APPROX(m1.arg(), arg(m1)); VERIFY((m1.isNaN() == (Eigen::isnan)(m1)).all()); VERIFY((m1.isInf() == (Eigen::isinf)(m1)).all()); VERIFY((m1.isFinite() == (Eigen::isfinite)(m1)).all()); VERIFY_IS_APPROX(m4.inverse(), inverse(m4)); VERIFY_IS_APPROX(m1.log(), log(m1)); VERIFY_IS_APPROX(m1.log10(), log10(m1)); VERIFY_IS_APPROX(m1.log2(), log2(m1)); VERIFY_IS_APPROX(m1.abs(), abs(m1)); VERIFY_IS_APPROX(m1.abs2(), abs2(m1)); VERIFY_IS_APPROX(m1.sqrt(), sqrt(m1)); VERIFY_IS_APPROX(m1.square(), square(m1)); VERIFY_IS_APPROX(m1.cube(), cube(m1)); VERIFY_IS_APPROX(cos(m1+RealScalar(3)*m2), cos((m1+RealScalar(3)*m2).eval())); VERIFY_IS_APPROX(m1.sign(), sign(m1)); VERIFY_IS_APPROX(m1.exp() * m2.exp(), exp(m1+m2)); VERIFY_IS_APPROX(m1.exp(), exp(m1)); VERIFY_IS_APPROX(m1.exp() / m2.exp(),(m1-m2).exp()); VERIFY_IS_APPROX(m1.expm1(), expm1(m1)); VERIFY_IS_APPROX(expm1(m1), exp(m1) - 1.); // Check for larger magnitude complex numbers that expm1 matches exp - 1. VERIFY_IS_APPROX(expm1(10. * m1), exp(10. * m1) - 1.); VERIFY_IS_APPROX(sinh(m1), 0.5*(exp(m1)-exp(-m1))); VERIFY_IS_APPROX(cosh(m1), 0.5*(exp(m1)+exp(-m1))); VERIFY_IS_APPROX(tanh(m1), (0.5*(exp(m1)-exp(-m1)))/(0.5*(exp(m1)+exp(-m1)))); VERIFY_IS_APPROX(logistic(m1), (1.0/(1.0 + exp(-m1)))); for (Index i = 0; i < m.rows(); ++i) for (Index j = 0; j < m.cols(); ++j) m3(i,j) = std::atan2(m1(i,j).imag(), m1(i,j).real()); VERIFY_IS_APPROX(arg(m1), m3); std::complex zero(0.0,0.0); VERIFY((Eigen::isnan)(m1*zero/zero).all()); #if EIGEN_COMP_MSVC // msvc complex division is not robust VERIFY((Eigen::isinf)(m4/RealScalar(0)).all()); #else #if EIGEN_COMP_CLANG // clang's complex division is notoriously broken too if((numext::isinf)(m4(0,0)/RealScalar(0))) { #endif VERIFY((Eigen::isinf)(m4/zero).all()); #if EIGEN_COMP_CLANG } else { VERIFY((Eigen::isinf)(m4.real()/zero.real()).all()); } #endif #endif // MSVC VERIFY(((Eigen::isfinite)(m1) && (!(Eigen::isfinite)(m1*zero/zero)) && (!(Eigen::isfinite)(m1/zero))).all()); VERIFY_IS_APPROX(inverse(inverse(m4)),m4); VERIFY_IS_APPROX(conj(m1.conjugate()), m1); VERIFY_IS_APPROX(abs(m1), sqrt(square(m1.real())+square(m1.imag()))); VERIFY_IS_APPROX(abs(m1), sqrt(abs2(m1))); VERIFY_IS_APPROX(log10(m1), log(m1)/log(10)); VERIFY_IS_APPROX(log2(m1), log(m1)/log(2)); VERIFY_IS_APPROX( m1.sign(), -(-m1).sign() ); VERIFY_IS_APPROX( m1.sign() * m1.abs(), m1); // scalar by array division Scalar s1 = internal::random(); const RealScalar tiny = std::sqrt(std::numeric_limits::epsilon()); s1 += Scalar(tiny); m1 += ArrayType::Constant(rows,cols,Scalar(tiny)); VERIFY_IS_APPROX(s1/m1, s1 * m1.inverse()); // check inplace transpose m2 = m1; m2.transposeInPlace(); VERIFY_IS_APPROX(m2, m1.transpose()); m2.transposeInPlace(); VERIFY_IS_APPROX(m2, m1); // Check vectorized inplace transpose. ArrayType m5 = ArrayType::Random(131, 131); ArrayType m6 = m5; m6.transposeInPlace(); VERIFY_IS_APPROX(m6, m5.transpose()); } template void min_max(const ArrayType& m) { typedef typename ArrayType::Scalar Scalar; Index rows = m.rows(); Index cols = m.cols(); ArrayType m1 = ArrayType::Random(rows, cols); // min/max with array Scalar maxM1 = m1.maxCoeff(); Scalar minM1 = m1.minCoeff(); VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, minM1), (m1.min)(ArrayType::Constant(rows,cols, minM1))); VERIFY_IS_APPROX(m1, (m1.min)(ArrayType::Constant(rows,cols, maxM1))); VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, maxM1), (m1.max)(ArrayType::Constant(rows,cols, maxM1))); VERIFY_IS_APPROX(m1, (m1.max)(ArrayType::Constant(rows,cols, minM1))); // min/max with scalar input VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, minM1), (m1.min)( minM1)); VERIFY_IS_APPROX(m1, (m1.min)( maxM1)); VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, maxM1), (m1.max)( maxM1)); VERIFY_IS_APPROX(m1, (m1.max)( minM1)); // min/max with various NaN propagation options. if (m1.size() > 1 && !NumTraits::IsInteger) { m1(0,0) = NumTraits::quiet_NaN(); maxM1 = m1.template maxCoeff(); minM1 = m1.template minCoeff(); VERIFY((numext::isnan)(maxM1)); VERIFY((numext::isnan)(minM1)); maxM1 = m1.template maxCoeff(); minM1 = m1.template minCoeff(); VERIFY(!(numext::isnan)(maxM1)); VERIFY(!(numext::isnan)(minM1)); } } template struct shift_left { template Scalar operator()(const Scalar& v) const { return v << N; } }; template struct arithmetic_shift_right { template Scalar operator()(const Scalar& v) const { return v >> N; } }; template void array_integer(const ArrayType& m) { Index rows = m.rows(); Index cols = m.cols(); ArrayType m1 = ArrayType::Random(rows, cols), m2(rows, cols); m2 = m1.template shiftLeft<2>(); VERIFY( (m2 == m1.unaryExpr(shift_left<2>())).all() ); m2 = m1.template shiftLeft<9>(); VERIFY( (m2 == m1.unaryExpr(shift_left<9>())).all() ); m2 = m1.template shiftRight<2>(); VERIFY( (m2 == m1.unaryExpr(arithmetic_shift_right<2>())).all() ); m2 = m1.template shiftRight<9>(); VERIFY( (m2 == m1.unaryExpr(arithmetic_shift_right<9>())).all() ); } EIGEN_DECLARE_TEST(array_cwise) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( array(Array()) ); CALL_SUBTEST_2( array(Array22f()) ); CALL_SUBTEST_3( array(Array44d()) ); CALL_SUBTEST_4( array(ArrayXXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_5( array(ArrayXXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( array(ArrayXXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( array(Array(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( array_integer(ArrayXXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( array_integer(Array(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( comparisons(Array()) ); CALL_SUBTEST_2( comparisons(Array22f()) ); CALL_SUBTEST_3( comparisons(Array44d()) ); CALL_SUBTEST_5( comparisons(ArrayXXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( comparisons(ArrayXXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( min_max(Array()) ); CALL_SUBTEST_2( min_max(Array22f()) ); CALL_SUBTEST_3( min_max(Array44d()) ); CALL_SUBTEST_5( min_max(ArrayXXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( min_max(ArrayXXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( array_real(Array()) ); CALL_SUBTEST_2( array_real(Array22f()) ); CALL_SUBTEST_3( array_real(Array44d()) ); CALL_SUBTEST_5( array_real(ArrayXXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_7( array_real(Array()) ); CALL_SUBTEST_8( array_real(Array()) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_4( array_complex(ArrayXXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } VERIFY((internal::is_same< internal::global_math_functions_filtering_base::type, int >::value)); VERIFY((internal::is_same< internal::global_math_functions_filtering_base::type, float >::value)); VERIFY((internal::is_same< internal::global_math_functions_filtering_base::type, ArrayBase >::value)); typedef CwiseUnaryOp, ArrayXd > Xpr; VERIFY((internal::is_same< internal::global_math_functions_filtering_base::type, ArrayBase >::value)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/array_for_matrix.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" template void array_for_matrix(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; typedef Matrix ColVectorType; typedef Matrix RowVectorType; Index rows = m.rows(); Index cols = m.cols(); MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), m3(rows, cols); ColVectorType cv1 = ColVectorType::Random(rows); RowVectorType rv1 = RowVectorType::Random(cols); Scalar s1 = internal::random(), s2 = internal::random(); // scalar addition VERIFY_IS_APPROX(m1.array() + s1, s1 + m1.array()); VERIFY_IS_APPROX((m1.array() + s1).matrix(), MatrixType::Constant(rows,cols,s1) + m1); VERIFY_IS_APPROX(((m1*Scalar(2)).array() - s2).matrix(), (m1+m1) - MatrixType::Constant(rows,cols,s2) ); m3 = m1; m3.array() += s2; VERIFY_IS_APPROX(m3, (m1.array() + s2).matrix()); m3 = m1; m3.array() -= s1; VERIFY_IS_APPROX(m3, (m1.array() - s1).matrix()); // reductions VERIFY_IS_MUCH_SMALLER_THAN(m1.colwise().sum().sum() - m1.sum(), m1.squaredNorm()); VERIFY_IS_MUCH_SMALLER_THAN(m1.rowwise().sum().sum() - m1.sum(), m1.squaredNorm()); VERIFY_IS_MUCH_SMALLER_THAN(m1.colwise().sum() + m2.colwise().sum() - (m1+m2).colwise().sum(), (m1+m2).squaredNorm()); VERIFY_IS_MUCH_SMALLER_THAN(m1.rowwise().sum() - m2.rowwise().sum() - (m1-m2).rowwise().sum(), (m1-m2).squaredNorm()); VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op())); // vector-wise ops m3 = m1; VERIFY_IS_APPROX(m3.colwise() += cv1, m1.colwise() + cv1); m3 = m1; VERIFY_IS_APPROX(m3.colwise() -= cv1, m1.colwise() - cv1); m3 = m1; VERIFY_IS_APPROX(m3.rowwise() += rv1, m1.rowwise() + rv1); m3 = m1; VERIFY_IS_APPROX(m3.rowwise() -= rv1, m1.rowwise() - rv1); // empty objects VERIFY_IS_APPROX((m1.template block<0,Dynamic>(0,0,0,cols).colwise().sum()), RowVectorType::Zero(cols)); VERIFY_IS_APPROX((m1.template block(0,0,rows,0).rowwise().sum()), ColVectorType::Zero(rows)); VERIFY_IS_APPROX((m1.template block<0,Dynamic>(0,0,0,cols).colwise().prod()), RowVectorType::Ones(cols)); VERIFY_IS_APPROX((m1.template block(0,0,rows,0).rowwise().prod()), ColVectorType::Ones(rows)); VERIFY_IS_APPROX(m1.block(0,0,0,cols).colwise().sum(), RowVectorType::Zero(cols)); VERIFY_IS_APPROX(m1.block(0,0,rows,0).rowwise().sum(), ColVectorType::Zero(rows)); VERIFY_IS_APPROX(m1.block(0,0,0,cols).colwise().prod(), RowVectorType::Ones(cols)); VERIFY_IS_APPROX(m1.block(0,0,rows,0).rowwise().prod(), ColVectorType::Ones(rows)); // verify the const accessors exist const Scalar& ref_m1 = m.matrix().array().coeffRef(0); const Scalar& ref_m2 = m.matrix().array().coeffRef(0,0); const Scalar& ref_a1 = m.array().matrix().coeffRef(0); const Scalar& ref_a2 = m.array().matrix().coeffRef(0,0); VERIFY(&ref_a1 == &ref_m1); VERIFY(&ref_a2 == &ref_m2); // Check write accessors: m1.array().coeffRef(0,0) = 1; VERIFY_IS_APPROX(m1(0,0),Scalar(1)); m1.array()(0,0) = 2; VERIFY_IS_APPROX(m1(0,0),Scalar(2)); m1.array().matrix().coeffRef(0,0) = 3; VERIFY_IS_APPROX(m1(0,0),Scalar(3)); m1.array().matrix()(0,0) = 4; VERIFY_IS_APPROX(m1(0,0),Scalar(4)); } template void comparisons(const MatrixType& m) { using std::abs; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; Index rows = m.rows(); Index cols = m.cols(); Index r = internal::random(0, rows-1), c = internal::random(0, cols-1); MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), m3(rows, cols); VERIFY(((m1.array() + Scalar(1)) > m1.array()).all()); VERIFY(((m1.array() - Scalar(1)) < m1.array()).all()); if (rows*cols>1) { m3 = m1; m3(r,c) += 1; VERIFY(! (m1.array() < m3.array()).all() ); VERIFY(! (m1.array() > m3.array()).all() ); } // comparisons to scalar VERIFY( (m1.array() != (m1(r,c)+1) ).any() ); VERIFY( (m1.array() > (m1(r,c)-1) ).any() ); VERIFY( (m1.array() < (m1(r,c)+1) ).any() ); VERIFY( (m1.array() == m1(r,c) ).any() ); VERIFY( m1.cwiseEqual(m1(r,c)).any() ); // test Select VERIFY_IS_APPROX( (m1.array()m2.array()).select(m1,m2), m1.cwiseMax(m2) ); Scalar mid = (m1.cwiseAbs().minCoeff() + m1.cwiseAbs().maxCoeff())/Scalar(2); for (int j=0; j=MatrixType::Constant(rows,cols,mid).array()) .select(m1,0), m3); // even shorter version: VERIFY_IS_APPROX( (m1.array().abs()RealScalar(0.1)).count() == rows*cols); // and/or VERIFY( ((m1.array()RealScalar(0)).matrix()).count() == 0); VERIFY( ((m1.array()=RealScalar(0)).matrix()).count() == rows*cols); RealScalar a = m1.cwiseAbs().mean(); VERIFY( ((m1.array()<-a).matrix() || (m1.array()>a).matrix()).count() == (m1.cwiseAbs().array()>a).count()); typedef Matrix VectorOfIndices; // TODO allows colwise/rowwise for array VERIFY_IS_APPROX(((m1.array().abs()+1)>RealScalar(0.1)).matrix().colwise().count(), VectorOfIndices::Constant(cols,rows).transpose()); VERIFY_IS_APPROX(((m1.array().abs()+1)>RealScalar(0.1)).matrix().rowwise().count(), VectorOfIndices::Constant(rows, cols)); } template void lpNorm(const VectorType& v) { using std::sqrt; typedef typename VectorType::RealScalar RealScalar; VectorType u = VectorType::Random(v.size()); if(v.size()==0) { VERIFY_IS_APPROX(u.template lpNorm(), RealScalar(0)); VERIFY_IS_APPROX(u.template lpNorm<1>(), RealScalar(0)); VERIFY_IS_APPROX(u.template lpNorm<2>(), RealScalar(0)); VERIFY_IS_APPROX(u.template lpNorm<5>(), RealScalar(0)); } else { VERIFY_IS_APPROX(u.template lpNorm(), u.cwiseAbs().maxCoeff()); } VERIFY_IS_APPROX(u.template lpNorm<1>(), u.cwiseAbs().sum()); VERIFY_IS_APPROX(u.template lpNorm<2>(), sqrt(u.array().abs().square().sum())); VERIFY_IS_APPROX(numext::pow(u.template lpNorm<5>(), typename VectorType::RealScalar(5)), u.array().abs().pow(5).sum()); } template void cwise_min_max(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; Index rows = m.rows(); Index cols = m.cols(); MatrixType m1 = MatrixType::Random(rows, cols); // min/max with array Scalar maxM1 = m1.maxCoeff(); Scalar minM1 = m1.minCoeff(); VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, minM1), m1.cwiseMin(MatrixType::Constant(rows,cols, minM1))); VERIFY_IS_APPROX(m1, m1.cwiseMin(MatrixType::Constant(rows,cols, maxM1))); VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, maxM1), m1.cwiseMax(MatrixType::Constant(rows,cols, maxM1))); VERIFY_IS_APPROX(m1, m1.cwiseMax(MatrixType::Constant(rows,cols, minM1))); // min/max with scalar input VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, minM1), m1.cwiseMin( minM1)); VERIFY_IS_APPROX(m1, m1.cwiseMin(maxM1)); VERIFY_IS_APPROX(-m1, (-m1).cwiseMin(-minM1)); VERIFY_IS_APPROX(-m1.array(), ((-m1).array().min)( -minM1)); VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, maxM1), m1.cwiseMax( maxM1)); VERIFY_IS_APPROX(m1, m1.cwiseMax(minM1)); VERIFY_IS_APPROX(-m1, (-m1).cwiseMax(-maxM1)); VERIFY_IS_APPROX(-m1.array(), ((-m1).array().max)(-maxM1)); VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, minM1).array(), (m1.array().min)( minM1)); VERIFY_IS_APPROX(m1.array(), (m1.array().min)( maxM1)); VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, maxM1).array(), (m1.array().max)( maxM1)); VERIFY_IS_APPROX(m1.array(), (m1.array().max)( minM1)); // Test NaN propagation for min/max. if (!NumTraits::IsInteger) { m1(0,0) = NumTraits::quiet_NaN(); // Elementwise. VERIFY((numext::isnan)(m1.template cwiseMax(MatrixType::Constant(rows,cols, Scalar(1)))(0,0))); VERIFY((numext::isnan)(m1.template cwiseMin(MatrixType::Constant(rows,cols, Scalar(1)))(0,0))); VERIFY(!(numext::isnan)(m1.template cwiseMax(MatrixType::Constant(rows,cols, Scalar(1)))(0,0))); VERIFY(!(numext::isnan)(m1.template cwiseMin(MatrixType::Constant(rows,cols, Scalar(1)))(0,0))); VERIFY((numext::isnan)(m1.array().template max(MatrixType::Constant(rows,cols, Scalar(1)).array())(0,0))); VERIFY((numext::isnan)(m1.array().template min(MatrixType::Constant(rows,cols, Scalar(1)).array())(0,0))); VERIFY(!(numext::isnan)(m1.array().template max(MatrixType::Constant(rows,cols, Scalar(1)).array())(0,0))); VERIFY(!(numext::isnan)(m1.array().template min(MatrixType::Constant(rows,cols, Scalar(1)).array())(0,0))); // Reductions. VERIFY((numext::isnan)(m1.template maxCoeff())); VERIFY((numext::isnan)(m1.template minCoeff())); if (m1.size() > 1) { VERIFY(!(numext::isnan)(m1.template maxCoeff())); VERIFY(!(numext::isnan)(m1.template minCoeff())); } else { VERIFY((numext::isnan)(m1.template maxCoeff())); VERIFY((numext::isnan)(m1.template minCoeff())); } } } template void resize(const MatrixTraits& t) { typedef typename MatrixTraits::Scalar Scalar; typedef Matrix MatrixType; typedef Array Array2DType; typedef Matrix VectorType; typedef Array Array1DType; Index rows = t.rows(), cols = t.cols(); MatrixType m(rows,cols); VectorType v(rows); Array2DType a2(rows,cols); Array1DType a1(rows); m.array().resize(rows+1,cols+1); VERIFY(m.rows()==rows+1 && m.cols()==cols+1); a2.matrix().resize(rows+1,cols+1); VERIFY(a2.rows()==rows+1 && a2.cols()==cols+1); v.array().resize(cols); VERIFY(v.size()==cols); a1.matrix().resize(cols); VERIFY(a1.size()==cols); } template void regression_bug_654() { ArrayXf a = RowVectorXf(3); VectorXf v = Array(3); } // Check propagation of LvalueBit through Array/Matrix-Wrapper template void regrrssion_bug_1410() { const Matrix4i M; const Array4i A; ArrayWrapper MA = M.array(); MA.row(0); MatrixWrapper AM = A.matrix(); AM.row(0); VERIFY((internal::traits >::Flags&LvalueBit)==0); VERIFY((internal::traits >::Flags&LvalueBit)==0); VERIFY((internal::traits >::Flags&LvalueBit)==LvalueBit); VERIFY((internal::traits >::Flags&LvalueBit)==LvalueBit); } EIGEN_DECLARE_TEST(array_for_matrix) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( array_for_matrix(Matrix()) ); CALL_SUBTEST_2( array_for_matrix(Matrix2f()) ); CALL_SUBTEST_3( array_for_matrix(Matrix4d()) ); CALL_SUBTEST_4( array_for_matrix(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_5( array_for_matrix(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( array_for_matrix(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( comparisons(Matrix()) ); CALL_SUBTEST_2( comparisons(Matrix2f()) ); CALL_SUBTEST_3( comparisons(Matrix4d()) ); CALL_SUBTEST_5( comparisons(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( comparisons(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( cwise_min_max(Matrix()) ); CALL_SUBTEST_2( cwise_min_max(Matrix2f()) ); CALL_SUBTEST_3( cwise_min_max(Matrix4d()) ); CALL_SUBTEST_5( cwise_min_max(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( cwise_min_max(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( lpNorm(Matrix()) ); CALL_SUBTEST_2( lpNorm(Vector2f()) ); CALL_SUBTEST_7( lpNorm(Vector3d()) ); CALL_SUBTEST_8( lpNorm(Vector4f()) ); CALL_SUBTEST_5( lpNorm(VectorXf(internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_4( lpNorm(VectorXcf(internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } CALL_SUBTEST_5( lpNorm(VectorXf(0)) ); CALL_SUBTEST_4( lpNorm(VectorXcf(0)) ); for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_4( resize(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_5( resize(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( resize(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } CALL_SUBTEST_6( regression_bug_654<0>() ); CALL_SUBTEST_6( regrrssion_bug_1410<0>() ); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/array_of_string.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" EIGEN_DECLARE_TEST(array_of_string) { typedef Array ArrayXs; ArrayXs a1(3), a2(3), a3(3), a3ref(3); a1 << "one", "two", "three"; a2 << "1", "2", "3"; a3ref << "one (1)", "two (2)", "three (3)"; std::stringstream s1; s1 << a1; VERIFY_IS_EQUAL(s1.str(), std::string(" one two three")); a3 = a1 + std::string(" (") + a2 + std::string(")"); VERIFY((a3==a3ref).all()); a3 = a1; a3 += std::string(" (") + a2 + std::string(")"); VERIFY((a3==a3ref).all()); a1.swap(a3); VERIFY((a1==a3ref).all()); VERIFY((a3!=a3ref).all()); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/array_replicate.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" template void replicate(const MatrixType& m) { /* this test covers the following files: Replicate.cpp */ typedef typename MatrixType::Scalar Scalar; typedef Matrix VectorType; typedef Matrix MatrixX; typedef Matrix VectorX; Index rows = m.rows(); Index cols = m.cols(); MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols); VectorType v1 = VectorType::Random(rows); MatrixX x1, x2; VectorX vx1; int f1 = internal::random(1,10), f2 = internal::random(1,10); x1.resize(rows*f1,cols*f2); for(int j=0; j())); x2.resize(rows,3*cols); x2 << m2, m2, m2; VERIFY_IS_APPROX(x2, (m2.template replicate<1,3>())); vx1.resize(3*rows,cols); vx1 << m2, m2, m2; VERIFY_IS_APPROX(vx1+vx1, vx1+(m2.template replicate<3,1>())); vx1=m2+(m2.colwise().replicate(1)); if(m2.cols()==1) VERIFY_IS_APPROX(m2.coeff(0), (m2.template replicate<3,1>().coeff(m2.rows()))); x2.resize(rows,f1); for (int j=0; j()) ); CALL_SUBTEST_2( replicate(Vector2f()) ); CALL_SUBTEST_3( replicate(Vector3d()) ); CALL_SUBTEST_4( replicate(Vector4f()) ); CALL_SUBTEST_5( replicate(VectorXf(16)) ); CALL_SUBTEST_6( replicate(VectorXcd(10)) ); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/array_reverse.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2009 Ricard Marxer // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include using namespace std; template void reverse(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; typedef Matrix VectorType; Index rows = m.rows(); Index cols = m.cols(); // this test relies a lot on Random.h, and there's not much more that we can do // to test it, hence I consider that we will have tested Random.h MatrixType m1 = MatrixType::Random(rows, cols), m2; VectorType v1 = VectorType::Random(rows); MatrixType m1_r = m1.reverse(); // Verify that MatrixBase::reverse() works for ( int i = 0; i < rows; i++ ) { for ( int j = 0; j < cols; j++ ) { VERIFY_IS_APPROX(m1_r(i, j), m1(rows - 1 - i, cols - 1 - j)); } } Reverse m1_rd(m1); // Verify that a Reverse default (in both directions) of an expression works for ( int i = 0; i < rows; i++ ) { for ( int j = 0; j < cols; j++ ) { VERIFY_IS_APPROX(m1_rd(i, j), m1(rows - 1 - i, cols - 1 - j)); } } Reverse m1_rb(m1); // Verify that a Reverse in both directions of an expression works for ( int i = 0; i < rows; i++ ) { for ( int j = 0; j < cols; j++ ) { VERIFY_IS_APPROX(m1_rb(i, j), m1(rows - 1 - i, cols - 1 - j)); } } Reverse m1_rv(m1); // Verify that a Reverse in the vertical directions of an expression works for ( int i = 0; i < rows; i++ ) { for ( int j = 0; j < cols; j++ ) { VERIFY_IS_APPROX(m1_rv(i, j), m1(rows - 1 - i, j)); } } Reverse m1_rh(m1); // Verify that a Reverse in the horizontal directions of an expression works for ( int i = 0; i < rows; i++ ) { for ( int j = 0; j < cols; j++ ) { VERIFY_IS_APPROX(m1_rh(i, j), m1(i, cols - 1 - j)); } } VectorType v1_r = v1.reverse(); // Verify that a VectorType::reverse() of an expression works for ( int i = 0; i < rows; i++ ) { VERIFY_IS_APPROX(v1_r(i), v1(rows - 1 - i)); } MatrixType m1_cr = m1.colwise().reverse(); // Verify that PartialRedux::reverse() works (for colwise()) for ( int i = 0; i < rows; i++ ) { for ( int j = 0; j < cols; j++ ) { VERIFY_IS_APPROX(m1_cr(i, j), m1(rows - 1 - i, j)); } } MatrixType m1_rr = m1.rowwise().reverse(); // Verify that PartialRedux::reverse() works (for rowwise()) for ( int i = 0; i < rows; i++ ) { for ( int j = 0; j < cols; j++ ) { VERIFY_IS_APPROX(m1_rr(i, j), m1(i, cols - 1 - j)); } } Scalar x = internal::random(); Index r = internal::random(0, rows-1), c = internal::random(0, cols-1); m1.reverse()(r, c) = x; VERIFY_IS_APPROX(x, m1(rows - 1 - r, cols - 1 - c)); m2 = m1; m2.reverseInPlace(); VERIFY_IS_APPROX(m2,m1.reverse().eval()); m2 = m1; m2.col(0).reverseInPlace(); VERIFY_IS_APPROX(m2.col(0),m1.col(0).reverse().eval()); m2 = m1; m2.row(0).reverseInPlace(); VERIFY_IS_APPROX(m2.row(0),m1.row(0).reverse().eval()); m2 = m1; m2.rowwise().reverseInPlace(); VERIFY_IS_APPROX(m2,m1.rowwise().reverse().eval()); m2 = m1; m2.colwise().reverseInPlace(); VERIFY_IS_APPROX(m2,m1.colwise().reverse().eval()); m1.colwise().reverse()(r, c) = x; VERIFY_IS_APPROX(x, m1(rows - 1 - r, c)); m1.rowwise().reverse()(r, c) = x; VERIFY_IS_APPROX(x, m1(r, cols - 1 - c)); } template void array_reverse_extra() { Vector4f x; x << 1, 2, 3, 4; Vector4f y; y << 4, 3, 2, 1; VERIFY(x.reverse()[1] == 3); VERIFY(x.reverse() == y); } // Simpler version of reverseInPlace leveraging a bug // in clang 6/7 with -O2 and AVX or AVX512 enabled. // This simpler version ensure that the clang bug is not simply hidden // through mis-inlining of reverseInPlace or other minor changes. template EIGEN_DONT_INLINE void bug1684_job1(MatrixType& m1, MatrixType& m2) { m2 = m1; m2.col(0).swap(m2.col(3)); m2.col(1).swap(m2.col(2)); } template EIGEN_DONT_INLINE void bug1684_job2(MatrixType& m1, MatrixType& m2) { m2 = m1; // load m1/m2 in AVX registers m1.col(0) = m2.col(3); // perform 128 bits moves m1.col(1) = m2.col(2); m1.col(2) = m2.col(1); m1.col(3) = m2.col(0); } template EIGEN_DONT_INLINE void bug1684_job3(MatrixType& m1, MatrixType& m2) { m2 = m1; Vector4f tmp; tmp = m2.col(0); m2.col(0) = m2.col(3); m2.col(3) = tmp; tmp = m2.col(1); m2.col(1) = m2.col(2); m2.col(2) = tmp; } template void bug1684() { Matrix4f m1 = Matrix4f::Random(); Matrix4f m2 = Matrix4f::Random(); bug1684_job1(m1,m2); VERIFY_IS_APPROX(m2, m1.rowwise().reverse().eval()); bug1684_job2(m1,m2); VERIFY_IS_APPROX(m2, m1.rowwise().reverse().eval()); // This one still fail after our swap's workaround, // but I expect users not to implement their own swap. // bug1684_job3(m1,m2); // VERIFY_IS_APPROX(m2, m1.rowwise().reverse().eval()); } EIGEN_DECLARE_TEST(array_reverse) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( reverse(Matrix()) ); CALL_SUBTEST_2( reverse(Matrix2f()) ); CALL_SUBTEST_3( reverse(Matrix4f()) ); CALL_SUBTEST_4( reverse(Matrix4d()) ); CALL_SUBTEST_5( reverse(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( reverse(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_7( reverse(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_8( reverse(Matrix()) ); CALL_SUBTEST_9( reverse(Matrix(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_3( bug1684<0>() ); } CALL_SUBTEST_3( array_reverse_extra<0>() ); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/bandmatrix.cpp ================================================ // This file is triangularView of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" template void bandmatrix(const MatrixType& _m) { typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef Matrix DenseMatrixType; Index rows = _m.rows(); Index cols = _m.cols(); Index supers = _m.supers(); Index subs = _m.subs(); MatrixType m(rows,cols,supers,subs); DenseMatrixType dm1(rows,cols); dm1.setZero(); m.diagonal().setConstant(123); dm1.diagonal().setConstant(123); for (int i=1; i<=m.supers();++i) { m.diagonal(i).setConstant(static_cast(i)); dm1.diagonal(i).setConstant(static_cast(i)); } for (int i=1; i<=m.subs();++i) { m.diagonal(-i).setConstant(-static_cast(i)); dm1.diagonal(-i).setConstant(-static_cast(i)); } //std::cerr << m.m_data << "\n\n" << m.toDense() << "\n\n" << dm1 << "\n\n\n\n"; VERIFY_IS_APPROX(dm1,m.toDenseMatrix()); for (int i=0; i(i+1)); dm1.col(i).setConstant(static_cast(i+1)); } Index d = (std::min)(rows,cols); Index a = std::max(0,cols-d-supers); Index b = std::max(0,rows-d-subs); if(a>0) dm1.block(0,d+supers,rows,a).setZero(); dm1.block(0,supers+1,cols-supers-1-a,cols-supers-1-a).template triangularView().setZero(); dm1.block(subs+1,0,rows-subs-1-b,rows-subs-1-b).template triangularView().setZero(); if(b>0) dm1.block(d+subs,0,b,cols).setZero(); //std::cerr << m.m_data << "\n\n" << m.toDense() << "\n\n" << dm1 << "\n\n"; VERIFY_IS_APPROX(dm1,m.toDenseMatrix()); } using Eigen::internal::BandMatrix; EIGEN_DECLARE_TEST(bandmatrix) { for(int i = 0; i < 10*g_repeat ; i++) { Index rows = internal::random(1,10); Index cols = internal::random(1,10); Index sups = internal::random(0,cols-1); Index subs = internal::random(0,rows-1); CALL_SUBTEST(bandmatrix(BandMatrix(rows,cols,sups,subs)) ); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/basicstuff.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include "random_without_cast_overflow.h" template typename internal::enable_if<(MatrixType::RowsAtCompileTime==1 || MatrixType::ColsAtCompileTime==1),void>::type check_index(const MatrixType& m) { VERIFY_RAISES_ASSERT(m[0]); VERIFY_RAISES_ASSERT((m+m)[0]); } template typename internal::enable_if::type check_index(const MatrixType& /*unused*/) {} template void basicStuff(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; typedef Matrix VectorType; typedef Matrix SquareMatrixType; Index rows = m.rows(); Index cols = m.cols(); // this test relies a lot on Random.h, and there's not much more that we can do // to test it, hence I consider that we will have tested Random.h MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), m3(rows, cols), mzero = MatrixType::Zero(rows, cols), square = Matrix::Random(rows, rows); VectorType v1 = VectorType::Random(rows), vzero = VectorType::Zero(rows); SquareMatrixType sm1 = SquareMatrixType::Random(rows,rows), sm2(rows,rows); Scalar x = 0; while(x == Scalar(0)) x = internal::random(); Index r = internal::random(0, rows-1), c = internal::random(0, cols-1); m1.coeffRef(r,c) = x; VERIFY_IS_APPROX(x, m1.coeff(r,c)); m1(r,c) = x; VERIFY_IS_APPROX(x, m1(r,c)); v1.coeffRef(r) = x; VERIFY_IS_APPROX(x, v1.coeff(r)); v1(r) = x; VERIFY_IS_APPROX(x, v1(r)); v1[r] = x; VERIFY_IS_APPROX(x, v1[r]); // test fetching with various index types. Index r1 = internal::random(0, numext::mini(Index(127),rows-1)); x = v1(static_cast(r1)); x = v1(static_cast(r1)); x = v1(static_cast(r1)); x = v1(static_cast(r1)); x = v1(static_cast(r1)); x = v1(static_cast(r1)); x = v1(static_cast(r1)); x = v1(static_cast(r1)); x = v1(static_cast(r1)); #if EIGEN_HAS_CXX11 x = v1(static_cast(r1)); x = v1(static_cast(r1)); #endif VERIFY_IS_APPROX( v1, v1); VERIFY_IS_NOT_APPROX( v1, 2*v1); VERIFY_IS_MUCH_SMALLER_THAN( vzero, v1); VERIFY_IS_MUCH_SMALLER_THAN( vzero, v1.squaredNorm()); VERIFY_IS_NOT_MUCH_SMALLER_THAN(v1, v1); VERIFY_IS_APPROX( vzero, v1-v1); VERIFY_IS_APPROX( m1, m1); VERIFY_IS_NOT_APPROX( m1, 2*m1); VERIFY_IS_MUCH_SMALLER_THAN( mzero, m1); VERIFY_IS_NOT_MUCH_SMALLER_THAN(m1, m1); VERIFY_IS_APPROX( mzero, m1-m1); // always test operator() on each read-only expression class, // in order to check const-qualifiers. // indeed, if an expression class (here Zero) is meant to be read-only, // hence has no _write() method, the corresponding MatrixBase method (here zero()) // should return a const-qualified object so that it is the const-qualified // operator() that gets called, which in turn calls _read(). VERIFY_IS_MUCH_SMALLER_THAN(MatrixType::Zero(rows,cols)(r,c), static_cast(1)); // now test copying a row-vector into a (column-)vector and conversely. square.col(r) = square.row(r).eval(); Matrix rv(rows); Matrix cv(rows); rv = square.row(r); cv = square.col(r); VERIFY_IS_APPROX(rv, cv.transpose()); if(cols!=1 && rows!=1 && MatrixType::SizeAtCompileTime!=Dynamic) { VERIFY_RAISES_ASSERT(m1 = (m2.block(0,0, rows-1, cols-1))); } if(cols!=1 && rows!=1) { check_index(m1); } VERIFY_IS_APPROX(m3 = m1,m1); MatrixType m4; VERIFY_IS_APPROX(m4 = m1,m1); m3.real() = m1.real(); VERIFY_IS_APPROX(static_cast(m3).real(), static_cast(m1).real()); VERIFY_IS_APPROX(static_cast(m3).real(), m1.real()); // check == / != operators VERIFY(m1==m1); VERIFY(m1!=m2); VERIFY(!(m1==m2)); VERIFY(!(m1!=m1)); m1 = m2; VERIFY(m1==m2); VERIFY(!(m1!=m2)); // check automatic transposition sm2.setZero(); for(Index i=0;i(0,10)>5; m3 = b ? m1 : m2; if(b) VERIFY_IS_APPROX(m3,m1); else VERIFY_IS_APPROX(m3,m2); m3 = b ? -m1 : m2; if(b) VERIFY_IS_APPROX(m3,-m1); else VERIFY_IS_APPROX(m3,m2); m3 = b ? m1 : -m2; if(b) VERIFY_IS_APPROX(m3,m1); else VERIFY_IS_APPROX(m3,-m2); } } template void basicStuffComplex(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef Matrix RealMatrixType; Index rows = m.rows(); Index cols = m.cols(); Scalar s1 = internal::random(), s2 = internal::random(); VERIFY(numext::real(s1)==numext::real_ref(s1)); VERIFY(numext::imag(s1)==numext::imag_ref(s1)); numext::real_ref(s1) = numext::real(s2); numext::imag_ref(s1) = numext::imag(s2); VERIFY(internal::isApprox(s1, s2, NumTraits::epsilon())); // extended precision in Intel FPUs means that s1 == s2 in the line above is not guaranteed. RealMatrixType rm1 = RealMatrixType::Random(rows,cols), rm2 = RealMatrixType::Random(rows,cols); MatrixType cm(rows,cols); cm.real() = rm1; cm.imag() = rm2; VERIFY_IS_APPROX(static_cast(cm).real(), rm1); VERIFY_IS_APPROX(static_cast(cm).imag(), rm2); rm1.setZero(); rm2.setZero(); rm1 = cm.real(); rm2 = cm.imag(); VERIFY_IS_APPROX(static_cast(cm).real(), rm1); VERIFY_IS_APPROX(static_cast(cm).imag(), rm2); cm.real().setZero(); VERIFY(static_cast(cm).real().isZero()); VERIFY(!static_cast(cm).imag().isZero()); } template struct casting_test { static void run() { Matrix m; for (int i=0; i::value(); } } Matrix n = m.template cast(); for (int i=0; i(m(i, j)))); } } } }; template struct casting_test_runner { static void run() { casting_test::run(); casting_test::run(); casting_test::run(); casting_test::run(); casting_test::run(); casting_test::run(); casting_test::run(); #if EIGEN_HAS_CXX11 casting_test::run(); casting_test::run(); #endif casting_test::run(); casting_test::run(); casting_test::run(); casting_test::run(); casting_test >::run(); casting_test >::run(); } }; template struct casting_test_runner::IsComplex)>::type> { static void run() { // Only a few casts from std::complex are defined. casting_test::run(); casting_test::run(); casting_test >::run(); casting_test >::run(); } }; void casting_all() { casting_test_runner::run(); casting_test_runner::run(); casting_test_runner::run(); casting_test_runner::run(); casting_test_runner::run(); casting_test_runner::run(); casting_test_runner::run(); #if EIGEN_HAS_CXX11 casting_test_runner::run(); casting_test_runner::run(); #endif casting_test_runner::run(); casting_test_runner::run(); casting_test_runner::run(); casting_test_runner::run(); casting_test_runner >::run(); casting_test_runner >::run(); } template void fixedSizeMatrixConstruction() { Scalar raw[4]; for(int k=0; k<4; ++k) raw[k] = internal::random(); { Matrix m(raw); Array a(raw); for(int k=0; k<4; ++k) VERIFY(m(k) == raw[k]); for(int k=0; k<4; ++k) VERIFY(a(k) == raw[k]); VERIFY_IS_EQUAL(m,(Matrix(raw[0],raw[1],raw[2],raw[3]))); VERIFY((a==(Array(raw[0],raw[1],raw[2],raw[3]))).all()); } { Matrix m(raw); Array a(raw); for(int k=0; k<3; ++k) VERIFY(m(k) == raw[k]); for(int k=0; k<3; ++k) VERIFY(a(k) == raw[k]); VERIFY_IS_EQUAL(m,(Matrix(raw[0],raw[1],raw[2]))); VERIFY((a==Array(raw[0],raw[1],raw[2])).all()); } { Matrix m(raw), m2( (DenseIndex(raw[0])), (DenseIndex(raw[1])) ); Array a(raw), a2( (DenseIndex(raw[0])), (DenseIndex(raw[1])) ); for(int k=0; k<2; ++k) VERIFY(m(k) == raw[k]); for(int k=0; k<2; ++k) VERIFY(a(k) == raw[k]); VERIFY_IS_EQUAL(m,(Matrix(raw[0],raw[1]))); VERIFY((a==Array(raw[0],raw[1])).all()); for(int k=0; k<2; ++k) VERIFY(m2(k) == DenseIndex(raw[k])); for(int k=0; k<2; ++k) VERIFY(a2(k) == DenseIndex(raw[k])); } { Matrix m(raw), m2( (DenseIndex(raw[0])), (DenseIndex(raw[1])) ), m3( (int(raw[0])), (int(raw[1])) ), m4( (float(raw[0])), (float(raw[1])) ); Array a(raw), a2( (DenseIndex(raw[0])), (DenseIndex(raw[1])) ); for(int k=0; k<2; ++k) VERIFY(m(k) == raw[k]); for(int k=0; k<2; ++k) VERIFY(a(k) == raw[k]); VERIFY_IS_EQUAL(m,(Matrix(raw[0],raw[1]))); VERIFY((a==Array(raw[0],raw[1])).all()); for(int k=0; k<2; ++k) VERIFY(m2(k) == DenseIndex(raw[k])); for(int k=0; k<2; ++k) VERIFY(a2(k) == DenseIndex(raw[k])); for(int k=0; k<2; ++k) VERIFY(m3(k) == int(raw[k])); for(int k=0; k<2; ++k) VERIFY((m4(k)) == Scalar(float(raw[k]))); } { Matrix m(raw), m1(raw[0]), m2( (DenseIndex(raw[0])) ), m3( (int(raw[0])) ); Array a(raw), a1(raw[0]), a2( (DenseIndex(raw[0])) ); VERIFY(m(0) == raw[0]); VERIFY(a(0) == raw[0]); VERIFY(m1(0) == raw[0]); VERIFY(a1(0) == raw[0]); VERIFY(m2(0) == DenseIndex(raw[0])); VERIFY(a2(0) == DenseIndex(raw[0])); VERIFY(m3(0) == int(raw[0])); VERIFY_IS_EQUAL(m,(Matrix(raw[0]))); VERIFY((a==Array(raw[0])).all()); } } EIGEN_DECLARE_TEST(basicstuff) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( basicStuff(Matrix()) ); CALL_SUBTEST_2( basicStuff(Matrix4d()) ); CALL_SUBTEST_3( basicStuff(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_4( basicStuff(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_5( basicStuff(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( basicStuff(Matrix()) ); CALL_SUBTEST_7( basicStuff(Matrix(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_8( casting_all() ); CALL_SUBTEST_3( basicStuffComplex(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_5( basicStuffComplex(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } CALL_SUBTEST_1(fixedSizeMatrixConstruction()); CALL_SUBTEST_1(fixedSizeMatrixConstruction()); CALL_SUBTEST_1(fixedSizeMatrixConstruction()); CALL_SUBTEST_1(fixedSizeMatrixConstruction()); CALL_SUBTEST_1(fixedSizeMatrixConstruction()); CALL_SUBTEST_1(fixedSizeMatrixConstruction()); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/bdcsvd.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2013 Gauthier Brun // Copyright (C) 2013 Nicolas Carre // Copyright (C) 2013 Jean Ceccato // Copyright (C) 2013 Pierre Zoppitelli // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/ // discard stack allocation as that too bypasses malloc #define EIGEN_STACK_ALLOCATION_LIMIT 0 #define EIGEN_RUNTIME_NO_MALLOC #include "main.h" #include #include #include #define SVD_DEFAULT(M) BDCSVD #define SVD_FOR_MIN_NORM(M) BDCSVD #include "svd_common.h" // Check all variants of JacobiSVD template void bdcsvd(const MatrixType& a = MatrixType(), bool pickrandom = true) { MatrixType m; if(pickrandom) { m.resizeLike(a); svd_fill_random(m); } else m = a; CALL_SUBTEST(( svd_test_all_computation_options >(m, false) )); } template void bdcsvd_method() { enum { Size = MatrixType::RowsAtCompileTime }; typedef typename MatrixType::RealScalar RealScalar; typedef Matrix RealVecType; MatrixType m = MatrixType::Identity(); VERIFY_IS_APPROX(m.bdcSvd().singularValues(), RealVecType::Ones()); VERIFY_RAISES_ASSERT(m.bdcSvd().matrixU()); VERIFY_RAISES_ASSERT(m.bdcSvd().matrixV()); VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).solve(m), m); VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).transpose().solve(m), m); VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).adjoint().solve(m), m); } // compare the Singular values returned with Jacobi and Bdc template void compare_bdc_jacobi(const MatrixType& a = MatrixType(), unsigned int computationOptions = 0) { MatrixType m = MatrixType::Random(a.rows(), a.cols()); BDCSVD bdc_svd(m); JacobiSVD jacobi_svd(m); VERIFY_IS_APPROX(bdc_svd.singularValues(), jacobi_svd.singularValues()); if(computationOptions & ComputeFullU) VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU()); if(computationOptions & ComputeThinU) VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU()); if(computationOptions & ComputeFullV) VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV()); if(computationOptions & ComputeThinV) VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV()); } EIGEN_DECLARE_TEST(bdcsvd) { CALL_SUBTEST_3(( svd_verify_assert >(Matrix3f()) )); CALL_SUBTEST_4(( svd_verify_assert >(Matrix4d()) )); CALL_SUBTEST_7(( svd_verify_assert >(MatrixXf(10,12)) )); CALL_SUBTEST_8(( svd_verify_assert >(MatrixXcd(7,5)) )); CALL_SUBTEST_101(( svd_all_trivial_2x2(bdcsvd) )); CALL_SUBTEST_102(( svd_all_trivial_2x2(bdcsvd) )); for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_3(( bdcsvd() )); CALL_SUBTEST_4(( bdcsvd() )); CALL_SUBTEST_5(( bdcsvd >() )); int r = internal::random(1, EIGEN_TEST_MAX_SIZE/2), c = internal::random(1, EIGEN_TEST_MAX_SIZE/2); TEST_SET_BUT_UNUSED_VARIABLE(r) TEST_SET_BUT_UNUSED_VARIABLE(c) CALL_SUBTEST_6(( bdcsvd(Matrix(r,2)) )); CALL_SUBTEST_7(( bdcsvd(MatrixXf(r,c)) )); CALL_SUBTEST_7(( compare_bdc_jacobi(MatrixXf(r,c)) )); CALL_SUBTEST_10(( bdcsvd(MatrixXd(r,c)) )); CALL_SUBTEST_10(( compare_bdc_jacobi(MatrixXd(r,c)) )); CALL_SUBTEST_8(( bdcsvd(MatrixXcd(r,c)) )); CALL_SUBTEST_8(( compare_bdc_jacobi(MatrixXcd(r,c)) )); // Test on inf/nan matrix CALL_SUBTEST_7( (svd_inf_nan, MatrixXf>()) ); CALL_SUBTEST_10( (svd_inf_nan, MatrixXd>()) ); } // test matrixbase method CALL_SUBTEST_1(( bdcsvd_method() )); CALL_SUBTEST_3(( bdcsvd_method() )); // Test problem size constructors CALL_SUBTEST_7( BDCSVD(10,10) ); // Check that preallocation avoids subsequent mallocs // Disabled because not supported by BDCSVD // CALL_SUBTEST_9( svd_preallocate() ); CALL_SUBTEST_2( svd_underoverflow() ); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/bfloat16_float.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include #include #include #include "main.h" #include #define VERIFY_BFLOAT16_BITS_EQUAL(h, bits) \ VERIFY_IS_EQUAL((numext::bit_cast(h)), (static_cast(bits))) // Make sure it's possible to forward declare Eigen::bfloat16 namespace Eigen { struct bfloat16; } using Eigen::bfloat16; float BinaryToFloat(uint32_t sign, uint32_t exponent, uint32_t high_mantissa, uint32_t low_mantissa) { float dest; uint32_t src = (sign << 31) + (exponent << 23) + (high_mantissa << 16) + low_mantissa; memcpy(static_cast(&dest), static_cast(&src), sizeof(dest)); return dest; } template void test_roundtrip() { // Representable T round trip via bfloat16 VERIFY_IS_EQUAL((internal::cast(internal::cast(-std::numeric_limits::infinity()))), -std::numeric_limits::infinity()); VERIFY_IS_EQUAL((internal::cast(internal::cast(std::numeric_limits::infinity()))), std::numeric_limits::infinity()); VERIFY_IS_EQUAL((internal::cast(internal::cast(T(-1.0)))), T(-1.0)); VERIFY_IS_EQUAL((internal::cast(internal::cast(T(-0.5)))), T(-0.5)); VERIFY_IS_EQUAL((internal::cast(internal::cast(T(-0.0)))), T(-0.0)); VERIFY_IS_EQUAL((internal::cast(internal::cast(T(1.0)))), T(1.0)); VERIFY_IS_EQUAL((internal::cast(internal::cast(T(0.5)))), T(0.5)); VERIFY_IS_EQUAL((internal::cast(internal::cast(T(0.0)))), T(0.0)); } void test_conversion() { using Eigen::bfloat16_impl::__bfloat16_raw; // Round-trip casts VERIFY_IS_EQUAL( numext::bit_cast(numext::bit_cast(bfloat16(1.0f))), bfloat16(1.0f)); VERIFY_IS_EQUAL( numext::bit_cast(numext::bit_cast(bfloat16(0.5f))), bfloat16(0.5f)); VERIFY_IS_EQUAL( numext::bit_cast(numext::bit_cast(bfloat16(-0.33333f))), bfloat16(-0.33333f)); VERIFY_IS_EQUAL( numext::bit_cast(numext::bit_cast(bfloat16(0.0f))), bfloat16(0.0f)); // Conversion from float. VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(1.0f), 0x3f80); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.5f), 0x3f00); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.33333f), 0x3eab); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(3.38e38f), 0x7f7e); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(3.40e38f), 0x7f80); // Becomes infinity. // Verify round-to-nearest-even behavior. float val1 = static_cast(bfloat16(__bfloat16_raw(0x3c00))); float val2 = static_cast(bfloat16(__bfloat16_raw(0x3c01))); float val3 = static_cast(bfloat16(__bfloat16_raw(0x3c02))); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.5f * (val1 + val2)), 0x3c00); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.5f * (val2 + val3)), 0x3c02); // Conversion from int. VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(-1), 0xbf80); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0), 0x0000); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(1), 0x3f80); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(2), 0x4000); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(3), 0x4040); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(12), 0x4140); // Conversion from bool. VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(false), 0x0000); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(true), 0x3f80); // Conversion to bool VERIFY_IS_EQUAL(static_cast(bfloat16(3)), true); VERIFY_IS_EQUAL(static_cast(bfloat16(0.33333f)), true); VERIFY_IS_EQUAL(bfloat16(-0.0), false); VERIFY_IS_EQUAL(static_cast(bfloat16(0.0)), false); // Explicit conversion to float. VERIFY_IS_EQUAL(static_cast(bfloat16(__bfloat16_raw(0x0000))), 0.0f); VERIFY_IS_EQUAL(static_cast(bfloat16(__bfloat16_raw(0x3f80))), 1.0f); // Implicit conversion to float VERIFY_IS_EQUAL(bfloat16(__bfloat16_raw(0x0000)), 0.0f); VERIFY_IS_EQUAL(bfloat16(__bfloat16_raw(0x3f80)), 1.0f); // Zero representations VERIFY_IS_EQUAL(bfloat16(0.0f), bfloat16(0.0f)); VERIFY_IS_EQUAL(bfloat16(-0.0f), bfloat16(0.0f)); VERIFY_IS_EQUAL(bfloat16(-0.0f), bfloat16(-0.0f)); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.0f), 0x0000); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(-0.0f), 0x8000); // Default is zero VERIFY_IS_EQUAL(static_cast(bfloat16()), 0.0f); // Representable floats round trip via bfloat16 test_roundtrip(); test_roundtrip(); test_roundtrip >(); test_roundtrip >(); // Conversion Array a; for (int i = 0; i < 100; i++) a(i) = i + 1.25; Array b = a.cast(); Array c = b.cast(); for (int i = 0; i < 100; ++i) { VERIFY_LE(numext::abs(c(i) - a(i)), a(i) / 128); } // Epsilon VERIFY_LE(1.0f, static_cast((std::numeric_limits::epsilon)() + bfloat16(1.0f))); VERIFY_IS_EQUAL(1.0f, static_cast((std::numeric_limits::epsilon)() / bfloat16(2.0f) + bfloat16(1.0f))); // Negate VERIFY_IS_EQUAL(static_cast(-bfloat16(3.0f)), -3.0f); VERIFY_IS_EQUAL(static_cast(-bfloat16(-4.5f)), 4.5f); #if !EIGEN_COMP_MSVC // Visual Studio errors out on divisions by 0 VERIFY((numext::isnan)(static_cast(bfloat16(0.0 / 0.0)))); VERIFY((numext::isinf)(static_cast(bfloat16(1.0 / 0.0)))); VERIFY((numext::isinf)(static_cast(bfloat16(-1.0 / 0.0)))); // Visual Studio errors out on divisions by 0 VERIFY((numext::isnan)(bfloat16(0.0 / 0.0))); VERIFY((numext::isinf)(bfloat16(1.0 / 0.0))); VERIFY((numext::isinf)(bfloat16(-1.0 / 0.0))); #endif // NaNs and infinities. VERIFY(!(numext::isinf)(static_cast(bfloat16(3.38e38f)))); // Largest finite number. VERIFY(!(numext::isnan)(static_cast(bfloat16(0.0f)))); VERIFY((numext::isinf)(static_cast(bfloat16(__bfloat16_raw(0xff80))))); VERIFY((numext::isnan)(static_cast(bfloat16(__bfloat16_raw(0xffc0))))); VERIFY((numext::isinf)(static_cast(bfloat16(__bfloat16_raw(0x7f80))))); VERIFY((numext::isnan)(static_cast(bfloat16(__bfloat16_raw(0x7fc0))))); // Exactly same checks as above, just directly on the bfloat16 representation. VERIFY(!(numext::isinf)(bfloat16(__bfloat16_raw(0x7bff)))); VERIFY(!(numext::isnan)(bfloat16(__bfloat16_raw(0x0000)))); VERIFY((numext::isinf)(bfloat16(__bfloat16_raw(0xff80)))); VERIFY((numext::isnan)(bfloat16(__bfloat16_raw(0xffc0)))); VERIFY((numext::isinf)(bfloat16(__bfloat16_raw(0x7f80)))); VERIFY((numext::isnan)(bfloat16(__bfloat16_raw(0x7fc0)))); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(BinaryToFloat(0x0, 0xff, 0x40, 0x0)), 0x7fc0); VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(BinaryToFloat(0x1, 0xff, 0x40, 0x0)), 0xffc0); } void test_numtraits() { std::cout << "epsilon = " << NumTraits::epsilon() << " (0x" << std::hex << numext::bit_cast(NumTraits::epsilon()) << ")" << std::endl; std::cout << "highest = " << NumTraits::highest() << " (0x" << std::hex << numext::bit_cast(NumTraits::highest()) << ")" << std::endl; std::cout << "lowest = " << NumTraits::lowest() << " (0x" << std::hex << numext::bit_cast(NumTraits::lowest()) << ")" << std::endl; std::cout << "min = " << (std::numeric_limits::min)() << " (0x" << std::hex << numext::bit_cast((std::numeric_limits::min)()) << ")" << std::endl; std::cout << "denorm min = " << (std::numeric_limits::denorm_min)() << " (0x" << std::hex << numext::bit_cast((std::numeric_limits::denorm_min)()) << ")" << std::endl; std::cout << "infinity = " << NumTraits::infinity() << " (0x" << std::hex << numext::bit_cast(NumTraits::infinity()) << ")" << std::endl; std::cout << "quiet nan = " << NumTraits::quiet_NaN() << " (0x" << std::hex << numext::bit_cast(NumTraits::quiet_NaN()) << ")" << std::endl; std::cout << "signaling nan = " << std::numeric_limits::signaling_NaN() << " (0x" << std::hex << numext::bit_cast(std::numeric_limits::signaling_NaN()) << ")" << std::endl; VERIFY(NumTraits::IsSigned); VERIFY_IS_EQUAL( numext::bit_cast(std::numeric_limits::infinity()), numext::bit_cast(bfloat16(std::numeric_limits::infinity())) ); // There is no guarantee that casting a 32-bit NaN to bfloat16 has a precise // bit pattern. We test that it is in fact a NaN, then test the signaling // bit (msb of significand is 1 for quiet, 0 for signaling). const numext::uint16_t BFLOAT16_QUIET_BIT = 0x0040; VERIFY( (numext::isnan)(std::numeric_limits::quiet_NaN()) && (numext::isnan)(bfloat16(std::numeric_limits::quiet_NaN())) && ((numext::bit_cast(std::numeric_limits::quiet_NaN()) & BFLOAT16_QUIET_BIT) > 0) && ((numext::bit_cast(bfloat16(std::numeric_limits::quiet_NaN())) & BFLOAT16_QUIET_BIT) > 0) ); // After a cast to bfloat16, a signaling NaN may become non-signaling. Thus, // we check that both are NaN, and that only the `numeric_limits` version is // signaling. VERIFY( (numext::isnan)(std::numeric_limits::signaling_NaN()) && (numext::isnan)(bfloat16(std::numeric_limits::signaling_NaN())) && ((numext::bit_cast(std::numeric_limits::signaling_NaN()) & BFLOAT16_QUIET_BIT) == 0) ); VERIFY( (std::numeric_limits::min)() > bfloat16(0.f) ); VERIFY( (std::numeric_limits::denorm_min)() > bfloat16(0.f) ); VERIFY_IS_EQUAL( (std::numeric_limits::denorm_min)()/bfloat16(2), bfloat16(0.f) ); } void test_arithmetic() { VERIFY_IS_EQUAL(static_cast(bfloat16(2) + bfloat16(2)), 4); VERIFY_IS_EQUAL(static_cast(bfloat16(2) + bfloat16(-2)), 0); VERIFY_IS_APPROX(static_cast(bfloat16(0.33333f) + bfloat16(0.66667f)), 1.0f); VERIFY_IS_EQUAL(static_cast(bfloat16(2.0f) * bfloat16(-5.5f)), -11.0f); VERIFY_IS_APPROX(static_cast(bfloat16(1.0f) / bfloat16(3.0f)), 0.3339f); VERIFY_IS_EQUAL(static_cast(-bfloat16(4096.0f)), -4096.0f); VERIFY_IS_EQUAL(static_cast(-bfloat16(-4096.0f)), 4096.0f); } void test_comparison() { VERIFY(bfloat16(1.0f) > bfloat16(0.5f)); VERIFY(bfloat16(0.5f) < bfloat16(1.0f)); VERIFY(!(bfloat16(1.0f) < bfloat16(0.5f))); VERIFY(!(bfloat16(0.5f) > bfloat16(1.0f))); VERIFY(!(bfloat16(4.0f) > bfloat16(4.0f))); VERIFY(!(bfloat16(4.0f) < bfloat16(4.0f))); VERIFY(!(bfloat16(0.0f) < bfloat16(-0.0f))); VERIFY(!(bfloat16(-0.0f) < bfloat16(0.0f))); VERIFY(!(bfloat16(0.0f) > bfloat16(-0.0f))); VERIFY(!(bfloat16(-0.0f) > bfloat16(0.0f))); VERIFY(bfloat16(0.2f) > bfloat16(-1.0f)); VERIFY(bfloat16(-1.0f) < bfloat16(0.2f)); VERIFY(bfloat16(-16.0f) < bfloat16(-15.0f)); VERIFY(bfloat16(1.0f) == bfloat16(1.0f)); VERIFY(bfloat16(1.0f) != bfloat16(2.0f)); // Comparisons with NaNs and infinities. #if !EIGEN_COMP_MSVC // Visual Studio errors out on divisions by 0 VERIFY(!(bfloat16(0.0 / 0.0) == bfloat16(0.0 / 0.0))); VERIFY(bfloat16(0.0 / 0.0) != bfloat16(0.0 / 0.0)); VERIFY(!(bfloat16(1.0) == bfloat16(0.0 / 0.0))); VERIFY(!(bfloat16(1.0) < bfloat16(0.0 / 0.0))); VERIFY(!(bfloat16(1.0) > bfloat16(0.0 / 0.0))); VERIFY(bfloat16(1.0) != bfloat16(0.0 / 0.0)); VERIFY(bfloat16(1.0) < bfloat16(1.0 / 0.0)); VERIFY(bfloat16(1.0) > bfloat16(-1.0 / 0.0)); #endif } void test_basic_functions() { VERIFY_IS_EQUAL(static_cast(numext::abs(bfloat16(3.5f))), 3.5f); VERIFY_IS_EQUAL(static_cast(abs(bfloat16(3.5f))), 3.5f); VERIFY_IS_EQUAL(static_cast(numext::abs(bfloat16(-3.5f))), 3.5f); VERIFY_IS_EQUAL(static_cast(abs(bfloat16(-3.5f))), 3.5f); VERIFY_IS_EQUAL(static_cast(numext::floor(bfloat16(3.5f))), 3.0f); VERIFY_IS_EQUAL(static_cast(floor(bfloat16(3.5f))), 3.0f); VERIFY_IS_EQUAL(static_cast(numext::floor(bfloat16(-3.5f))), -4.0f); VERIFY_IS_EQUAL(static_cast(floor(bfloat16(-3.5f))), -4.0f); VERIFY_IS_EQUAL(static_cast(numext::ceil(bfloat16(3.5f))), 4.0f); VERIFY_IS_EQUAL(static_cast(ceil(bfloat16(3.5f))), 4.0f); VERIFY_IS_EQUAL(static_cast(numext::ceil(bfloat16(-3.5f))), -3.0f); VERIFY_IS_EQUAL(static_cast(ceil(bfloat16(-3.5f))), -3.0f); VERIFY_IS_APPROX(static_cast(numext::sqrt(bfloat16(0.0f))), 0.0f); VERIFY_IS_APPROX(static_cast(sqrt(bfloat16(0.0f))), 0.0f); VERIFY_IS_APPROX(static_cast(numext::sqrt(bfloat16(4.0f))), 2.0f); VERIFY_IS_APPROX(static_cast(sqrt(bfloat16(4.0f))), 2.0f); VERIFY_IS_APPROX(static_cast(numext::pow(bfloat16(0.0f), bfloat16(1.0f))), 0.0f); VERIFY_IS_APPROX(static_cast(pow(bfloat16(0.0f), bfloat16(1.0f))), 0.0f); VERIFY_IS_APPROX(static_cast(numext::pow(bfloat16(2.0f), bfloat16(2.0f))), 4.0f); VERIFY_IS_APPROX(static_cast(pow(bfloat16(2.0f), bfloat16(2.0f))), 4.0f); VERIFY_IS_EQUAL(static_cast(numext::exp(bfloat16(0.0f))), 1.0f); VERIFY_IS_EQUAL(static_cast(exp(bfloat16(0.0f))), 1.0f); VERIFY_IS_APPROX(static_cast(numext::exp(bfloat16(EIGEN_PI))), 20.f + static_cast(EIGEN_PI)); VERIFY_IS_APPROX(static_cast(exp(bfloat16(EIGEN_PI))), 20.f + static_cast(EIGEN_PI)); VERIFY_IS_EQUAL(static_cast(numext::expm1(bfloat16(0.0f))), 0.0f); VERIFY_IS_EQUAL(static_cast(expm1(bfloat16(0.0f))), 0.0f); VERIFY_IS_APPROX(static_cast(numext::expm1(bfloat16(2.0f))), 6.375f); VERIFY_IS_APPROX(static_cast(expm1(bfloat16(2.0f))), 6.375f); VERIFY_IS_EQUAL(static_cast(numext::log(bfloat16(1.0f))), 0.0f); VERIFY_IS_EQUAL(static_cast(log(bfloat16(1.0f))), 0.0f); VERIFY_IS_APPROX(static_cast(numext::log(bfloat16(10.0f))), 2.296875f); VERIFY_IS_APPROX(static_cast(log(bfloat16(10.0f))), 2.296875f); VERIFY_IS_EQUAL(static_cast(numext::log1p(bfloat16(0.0f))), 0.0f); VERIFY_IS_EQUAL(static_cast(log1p(bfloat16(0.0f))), 0.0f); VERIFY_IS_APPROX(static_cast(numext::log1p(bfloat16(10.0f))), 2.390625f); VERIFY_IS_APPROX(static_cast(log1p(bfloat16(10.0f))), 2.390625f); } void test_trigonometric_functions() { VERIFY_IS_APPROX(numext::cos(bfloat16(0.0f)), bfloat16(cosf(0.0f))); VERIFY_IS_APPROX(cos(bfloat16(0.0f)), bfloat16(cosf(0.0f))); VERIFY_IS_APPROX(numext::cos(bfloat16(EIGEN_PI)), bfloat16(cosf(EIGEN_PI))); // VERIFY_IS_APPROX(numext::cos(bfloat16(EIGEN_PI/2)), bfloat16(cosf(EIGEN_PI/2))); // VERIFY_IS_APPROX(numext::cos(bfloat16(3*EIGEN_PI/2)), bfloat16(cosf(3*EIGEN_PI/2))); VERIFY_IS_APPROX(numext::cos(bfloat16(3.5f)), bfloat16(cosf(3.5f))); VERIFY_IS_APPROX(numext::sin(bfloat16(0.0f)), bfloat16(sinf(0.0f))); VERIFY_IS_APPROX(sin(bfloat16(0.0f)), bfloat16(sinf(0.0f))); // VERIFY_IS_APPROX(numext::sin(bfloat16(EIGEN_PI)), bfloat16(sinf(EIGEN_PI))); VERIFY_IS_APPROX(numext::sin(bfloat16(EIGEN_PI/2)), bfloat16(sinf(EIGEN_PI/2))); VERIFY_IS_APPROX(numext::sin(bfloat16(3*EIGEN_PI/2)), bfloat16(sinf(3*EIGEN_PI/2))); VERIFY_IS_APPROX(numext::sin(bfloat16(3.5f)), bfloat16(sinf(3.5f))); VERIFY_IS_APPROX(numext::tan(bfloat16(0.0f)), bfloat16(tanf(0.0f))); VERIFY_IS_APPROX(tan(bfloat16(0.0f)), bfloat16(tanf(0.0f))); // VERIFY_IS_APPROX(numext::tan(bfloat16(EIGEN_PI)), bfloat16(tanf(EIGEN_PI))); // VERIFY_IS_APPROX(numext::tan(bfloat16(EIGEN_PI/2)), bfloat16(tanf(EIGEN_PI/2))); // VERIFY_IS_APPROX(numext::tan(bfloat16(3*EIGEN_PI/2)), bfloat16(tanf(3*EIGEN_PI/2))); VERIFY_IS_APPROX(numext::tan(bfloat16(3.5f)), bfloat16(tanf(3.5f))); } void test_array() { typedef Array ArrayXh; Index size = internal::random(1,10); Index i = internal::random(0,size-1); ArrayXh a1 = ArrayXh::Random(size), a2 = ArrayXh::Random(size); VERIFY_IS_APPROX( a1+a1, bfloat16(2)*a1 ); VERIFY( (a1.abs() >= bfloat16(0)).all() ); VERIFY_IS_APPROX( (a1*a1).sqrt(), a1.abs() ); VERIFY( ((a1.min)(a2) <= (a1.max)(a2)).all() ); a1(i) = bfloat16(-10.); VERIFY_IS_EQUAL( a1.minCoeff(), bfloat16(-10.) ); a1(i) = bfloat16(10.); VERIFY_IS_EQUAL( a1.maxCoeff(), bfloat16(10.) ); std::stringstream ss; ss << a1; } void test_product() { typedef Matrix MatrixXh; Index rows = internal::random(1,EIGEN_TEST_MAX_SIZE); Index cols = internal::random(1,EIGEN_TEST_MAX_SIZE); Index depth = internal::random(1,EIGEN_TEST_MAX_SIZE); MatrixXh Ah = MatrixXh::Random(rows,depth); MatrixXh Bh = MatrixXh::Random(depth,cols); MatrixXh Ch = MatrixXh::Random(rows,cols); MatrixXf Af = Ah.cast(); MatrixXf Bf = Bh.cast(); MatrixXf Cf = Ch.cast(); VERIFY_IS_APPROX(Ch.noalias()+=Ah*Bh, (Cf.noalias()+=Af*Bf).cast()); } EIGEN_DECLARE_TEST(bfloat16_float) { CALL_SUBTEST(test_numtraits()); for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST(test_conversion()); CALL_SUBTEST(test_arithmetic()); CALL_SUBTEST(test_comparison()); CALL_SUBTEST(test_basic_functions()); CALL_SUBTEST(test_trigonometric_functions()); CALL_SUBTEST(test_array()); CALL_SUBTEST(test_product()); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/bicgstab.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "sparse_solver.h" #include template void test_bicgstab_T() { BiCGSTAB, DiagonalPreconditioner > bicgstab_colmajor_diag; BiCGSTAB, IdentityPreconditioner > bicgstab_colmajor_I; BiCGSTAB, IncompleteLUT > bicgstab_colmajor_ilut; //BiCGSTAB, SSORPreconditioner > bicgstab_colmajor_ssor; bicgstab_colmajor_diag.setTolerance(NumTraits::epsilon()*4); bicgstab_colmajor_ilut.setTolerance(NumTraits::epsilon()*4); CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_diag) ); // CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_I) ); CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_ilut) ); //CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_ssor) ); } EIGEN_DECLARE_TEST(bicgstab) { CALL_SUBTEST_1((test_bicgstab_T()) ); CALL_SUBTEST_2((test_bicgstab_T, int>())); CALL_SUBTEST_3((test_bicgstab_T())); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/blasutil.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2020 Everton Constantino // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/ #include "main.h" // Disable "ignoring attributes on template argument" // for packet_traits // => The only workaround would be to wrap _m128 and the likes // within wrappers. #if EIGEN_GNUC_AT_LEAST(6,0) #pragma GCC diagnostic ignored "-Wignored-attributes" #endif #define GET(i,j) (StorageOrder == RowMajor ? (i)*stride + (j) : (i) + (j)*stride) #define SCATTER(i,j,k) (StorageOrder == RowMajor ? ((i)+(k))*stride + (j) : (i) + ((j)+(k))*stride) template void compare(const Packet& a, const Packet& b) { int pktsz = internal::packet_traits::size; Scalar *buffA = new Scalar[pktsz]; Scalar *buffB = new Scalar[pktsz]; internal::pstoreu(buffA, a); internal::pstoreu(buffB, b); for(int i = 0; i < pktsz; i++) { VERIFY_IS_EQUAL(buffA[i], buffB[i]); } delete[] buffA; delete[] buffB; } template struct PacketBlockSet { typedef typename internal::packet_traits::type Packet; void setPacketBlock(internal::PacketBlock& block, Scalar value) { for(int idx = 0; idx < n; idx++) { block.packet[idx] = internal::pset1(value); } } void comparePacketBlock(Scalar *data, int i, int j, int stride, internal::PacketBlock& block) { for(int idx = 0; idx < n; idx++) { Packet line = internal::ploadu(data + SCATTER(i,j,idx)); compare(block.packet[idx], line); } } }; template void run_bdmp_spec_1() { typedef internal::blas_data_mapper BlasDataMapper; int packetSize = internal::packet_traits::size; int minSize = std::max(packetSize, BlockSize); typedef typename internal::packet_traits::type Packet; int szm = internal::random(minSize,500), szn = internal::random(minSize,500); int stride = StorageOrder == RowMajor ? szn : szm; Scalar *d = new Scalar[szn*szm]; // Initializing with random entries for(int i = 0; i < szm*szn; i++) { d[i] = internal::random(static_cast(3), static_cast(10)); } BlasDataMapper bdm(d, stride); // Testing operator() for(int i = 0; i < szm; i++) { for(int j = 0; j < szn; j++) { VERIFY_IS_EQUAL(d[GET(i,j)], bdm(i,j)); } } // Testing getSubMapper and getLinearMapper int i0 = internal::random(0,szm-2); int j0 = internal::random(0,szn-2); for(int i = i0; i < szm; i++) { for(int j = j0; j < szn; j++) { const BlasDataMapper& bdmSM = bdm.getSubMapper(i0,j0); const internal::BlasLinearMapper& bdmLM = bdm.getLinearMapper(i0,j0); Scalar v = bdmSM(i - i0, j - j0); Scalar vd = d[GET(i,j)]; VERIFY_IS_EQUAL(vd, v); VERIFY_IS_EQUAL(vd, bdmLM(GET(i-i0, j-j0))); } } // Testing loadPacket for(int i = 0; i < szm - minSize; i++) { for(int j = 0; j < szn - minSize; j++) { Packet pktBDM = bdm.template loadPacket(i,j); Packet pktD = internal::ploadu(d + GET(i,j)); compare(pktBDM, pktD); } } // Testing gatherPacket Scalar *buff = new Scalar[packetSize]; for(int i = 0; i < szm - minSize; i++) { for(int j = 0; j < szn - minSize; j++) { Packet p = bdm.template gatherPacket(i,j); internal::pstoreu(buff, p); for(int k = 0; k < packetSize; k++) { VERIFY_IS_EQUAL(d[SCATTER(i,j,k)], buff[k]); } } } delete[] buff; // Testing scatterPacket for(int i = 0; i < szm - minSize; i++) { for(int j = 0; j < szn - minSize; j++) { Packet p = internal::pset1(static_cast(1)); bdm.template scatterPacket(i,j,p); for(int k = 0; k < packetSize; k++) { VERIFY_IS_EQUAL(d[SCATTER(i,j,k)], static_cast(1)); } } } //Testing storePacketBlock internal::PacketBlock block; PacketBlockSet pbs; pbs.setPacketBlock(block, static_cast(2)); for(int i = 0; i < szm - minSize; i++) { for(int j = 0; j < szn - minSize; j++) { bdm.template storePacketBlock(i, j, block); pbs.comparePacketBlock(d, i, j, stride, block); } } delete[] d; } template void run_test() { run_bdmp_spec_1(); run_bdmp_spec_1(); run_bdmp_spec_1(); run_bdmp_spec_1(); run_bdmp_spec_1(); run_bdmp_spec_1(); run_bdmp_spec_1(); run_bdmp_spec_1(); run_bdmp_spec_1(); run_bdmp_spec_1(); } EIGEN_DECLARE_TEST(blasutil) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(run_test()); CALL_SUBTEST_2(run_test()); CALL_SUBTEST_3(run_test()); // TODO: Replace this by a call to numext::int64_t as soon as we have a way to // detect the typedef for int64_t on all platforms #if EIGEN_HAS_CXX11 CALL_SUBTEST_4(run_test()); #else CALL_SUBTEST_4(run_test()); #endif CALL_SUBTEST_5(run_test()); CALL_SUBTEST_6(run_test()); CALL_SUBTEST_7(run_test >()); CALL_SUBTEST_8(run_test >()); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/block.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" template typename Eigen::internal::enable_if::IsComplex,typename MatrixType::Scalar>::type block_real_only(const MatrixType &m1, Index r1, Index r2, Index c1, Index c2, const Scalar& s1) { // check cwise-Functions: VERIFY_IS_APPROX(m1.row(r1).cwiseMax(s1), m1.cwiseMax(s1).row(r1)); VERIFY_IS_APPROX(m1.col(c1).cwiseMin(s1), m1.cwiseMin(s1).col(c1)); VERIFY_IS_APPROX(m1.block(r1,c1,r2-r1+1,c2-c1+1).cwiseMin(s1), m1.cwiseMin(s1).block(r1,c1,r2-r1+1,c2-c1+1)); VERIFY_IS_APPROX(m1.block(r1,c1,r2-r1+1,c2-c1+1).cwiseMax(s1), m1.cwiseMax(s1).block(r1,c1,r2-r1+1,c2-c1+1)); return Scalar(0); } template typename Eigen::internal::enable_if::IsComplex,typename MatrixType::Scalar>::type block_real_only(const MatrixType &, Index, Index, Index, Index, const Scalar&) { return Scalar(0); } // Check at compile-time that T1==T2, and at runtime-time that a==b template typename internal::enable_if::value,bool>::type is_same_block(const T1& a, const T2& b) { return a.isApprox(b); } template typename internal::enable_if<((MatrixType::Flags&RowMajorBit)==0),void>::type check_left_top(const MatrixType& m, Index r, Index c, Index rows, Index /*unused*/) { VERIFY_IS_EQUAL(m.leftCols(c).coeff(r+c*rows), m(r,c)); } template typename internal::enable_if<((MatrixType::Flags&RowMajorBit)!=0),void>::type check_left_top(const MatrixType& m, Index r, Index c, Index /*unused*/, Index cols) { VERIFY_IS_EQUAL(m.topRows(r).coeff(c+r*cols), m(r,c)); } template void block(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef Matrix VectorType; typedef Matrix RowVectorType; typedef Matrix DynamicMatrixType; typedef Matrix DynamicVectorType; Index rows = m.rows(); Index cols = m.cols(); MatrixType m1 = MatrixType::Random(rows, cols), m1_copy = m1, m2 = MatrixType::Random(rows, cols), m3(rows, cols), ones = MatrixType::Ones(rows, cols); VectorType v1 = VectorType::Random(rows); Scalar s1 = internal::random(); Index r1 = internal::random(0,rows-1); Index r2 = internal::random(r1,rows-1); Index c1 = internal::random(0,cols-1); Index c2 = internal::random(c1,cols-1); block_real_only(m1, r1, r2, c1, c1, s1); //check row() and col() VERIFY_IS_EQUAL(m1.col(c1).transpose(), m1.transpose().row(c1)); //check operator(), both constant and non-constant, on row() and col() m1 = m1_copy; m1.row(r1) += s1 * m1_copy.row(r2); VERIFY_IS_APPROX(m1.row(r1), m1_copy.row(r1) + s1 * m1_copy.row(r2)); // check nested block xpr on lhs m1.row(r1).row(0) += s1 * m1_copy.row(r2); VERIFY_IS_APPROX(m1.row(r1), m1_copy.row(r1) + Scalar(2) * s1 * m1_copy.row(r2)); m1 = m1_copy; m1.col(c1) += s1 * m1_copy.col(c2); VERIFY_IS_APPROX(m1.col(c1), m1_copy.col(c1) + s1 * m1_copy.col(c2)); m1.col(c1).col(0) += s1 * m1_copy.col(c2); VERIFY_IS_APPROX(m1.col(c1), m1_copy.col(c1) + Scalar(2) * s1 * m1_copy.col(c2)); check_left_top(m1,r1,c1,rows,cols); //check block() Matrix b1(1,1); b1(0,0) = m1(r1,c1); RowVectorType br1(m1.block(r1,0,1,cols)); VectorType bc1(m1.block(0,c1,rows,1)); VERIFY_IS_EQUAL(b1, m1.block(r1,c1,1,1)); VERIFY_IS_EQUAL(m1.row(r1), br1); VERIFY_IS_EQUAL(m1.col(c1), bc1); //check operator(), both constant and non-constant, on block() m1.block(r1,c1,r2-r1+1,c2-c1+1) = s1 * m2.block(0, 0, r2-r1+1,c2-c1+1); m1.block(r1,c1,r2-r1+1,c2-c1+1)(r2-r1,c2-c1) = m2.block(0, 0, r2-r1+1,c2-c1+1)(0,0); const Index BlockRows = 2; const Index BlockCols = 5; if (rows>=5 && cols>=8) { // test fixed block() as lvalue m1.template block(1,1) *= s1; // test operator() on fixed block() both as constant and non-constant m1.template block(1,1)(0, 3) = m1.template block<2,5>(1,1)(1,2); // check that fixed block() and block() agree Matrix b = m1.template block(3,3); VERIFY_IS_EQUAL(b, m1.block(3,3,BlockRows,BlockCols)); // same tests with mixed fixed/dynamic size m1.template block(1,1,BlockRows,BlockCols) *= s1; m1.template block(1,1,BlockRows,BlockCols)(0,3) = m1.template block<2,5>(1,1)(1,2); Matrix b2 = m1.template block(3,3,2,5); VERIFY_IS_EQUAL(b2, m1.block(3,3,BlockRows,BlockCols)); VERIFY(is_same_block(m1.block(3,3,BlockRows,BlockCols), m1.block(3,3,fix(BlockRows),fix(BlockCols)))); VERIFY(is_same_block(m1.template block(1,1,BlockRows,BlockCols), m1.block(1,1,fix,BlockCols))); VERIFY(is_same_block(m1.template block(1,1,BlockRows,BlockCols), m1.block(1,1,fix(),fix))); VERIFY(is_same_block(m1.template block(1,1,BlockRows,BlockCols), m1.block(1,1,fix,fix(BlockCols)))); } if (rows>2) { // test sub vectors VERIFY_IS_EQUAL(v1.template head<2>(), v1.block(0,0,2,1)); VERIFY_IS_EQUAL(v1.template head<2>(), v1.head(2)); VERIFY_IS_EQUAL(v1.template head<2>(), v1.segment(0,2)); VERIFY_IS_EQUAL(v1.template head<2>(), v1.template segment<2>(0)); Index i = rows-2; VERIFY_IS_EQUAL(v1.template tail<2>(), v1.block(i,0,2,1)); VERIFY_IS_EQUAL(v1.template tail<2>(), v1.tail(2)); VERIFY_IS_EQUAL(v1.template tail<2>(), v1.segment(i,2)); VERIFY_IS_EQUAL(v1.template tail<2>(), v1.template segment<2>(i)); i = internal::random(0,rows-2); VERIFY_IS_EQUAL(v1.segment(i,2), v1.template segment<2>(i)); } // stress some basic stuffs with block matrices VERIFY(numext::real(ones.col(c1).sum()) == RealScalar(rows)); VERIFY(numext::real(ones.row(r1).sum()) == RealScalar(cols)); VERIFY(numext::real(ones.col(c1).dot(ones.col(c2))) == RealScalar(rows)); VERIFY(numext::real(ones.row(r1).dot(ones.row(r2))) == RealScalar(cols)); // check that linear acccessors works on blocks m1 = m1_copy; // now test some block-inside-of-block. // expressions with direct access VERIFY_IS_EQUAL( (m1.block(r1,c1,rows-r1,cols-c1).block(r2-r1,c2-c1,rows-r2,cols-c2)) , (m1.block(r2,c2,rows-r2,cols-c2)) ); VERIFY_IS_EQUAL( (m1.block(r1,c1,r2-r1+1,c2-c1+1).row(0)) , (m1.row(r1).segment(c1,c2-c1+1)) ); VERIFY_IS_EQUAL( (m1.block(r1,c1,r2-r1+1,c2-c1+1).col(0)) , (m1.col(c1).segment(r1,r2-r1+1)) ); VERIFY_IS_EQUAL( (m1.block(r1,c1,r2-r1+1,c2-c1+1).transpose().col(0)) , (m1.row(r1).segment(c1,c2-c1+1)).transpose() ); VERIFY_IS_EQUAL( (m1.transpose().block(c1,r1,c2-c1+1,r2-r1+1).col(0)) , (m1.row(r1).segment(c1,c2-c1+1)).transpose() ); // expressions without direct access VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,rows-r1,cols-c1).block(r2-r1,c2-c1,rows-r2,cols-c2)) , ((m1+m2).block(r2,c2,rows-r2,cols-c2)) ); VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).row(0)) , ((m1+m2).row(r1).segment(c1,c2-c1+1)) ); VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).row(0)) , ((m1+m2).eval().row(r1).segment(c1,c2-c1+1)) ); VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).col(0)) , ((m1+m2).col(c1).segment(r1,r2-r1+1)) ); VERIFY_IS_APPROX( ((m1+m2).block(r1,c1,r2-r1+1,c2-c1+1).transpose().col(0)) , ((m1+m2).row(r1).segment(c1,c2-c1+1)).transpose() ); VERIFY_IS_APPROX( ((m1+m2).transpose().block(c1,r1,c2-c1+1,r2-r1+1).col(0)) , ((m1+m2).row(r1).segment(c1,c2-c1+1)).transpose() ); VERIFY_IS_APPROX( ((m1+m2).template block(r1,c1,r2-r1+1,1)) , ((m1+m2).eval().col(c1).eval().segment(r1,r2-r1+1)) ); VERIFY_IS_APPROX( ((m1+m2).template block<1,Dynamic>(r1,c1,1,c2-c1+1)) , ((m1+m2).eval().row(r1).eval().segment(c1,c2-c1+1)) ); VERIFY_IS_APPROX( ((m1+m2).transpose().template block<1,Dynamic>(c1,r1,1,r2-r1+1)) , ((m1+m2).eval().col(c1).eval().segment(r1,r2-r1+1)).transpose() ); VERIFY_IS_APPROX( (m1+m2).row(r1).eval(), (m1+m2).eval().row(r1) ); VERIFY_IS_APPROX( (m1+m2).adjoint().col(r1).eval(), (m1+m2).adjoint().eval().col(r1) ); VERIFY_IS_APPROX( (m1+m2).adjoint().row(c1).eval(), (m1+m2).adjoint().eval().row(c1) ); VERIFY_IS_APPROX( (m1*1).row(r1).segment(c1,c2-c1+1).eval(), m1.row(r1).eval().segment(c1,c2-c1+1).eval() ); VERIFY_IS_APPROX( m1.col(c1).reverse().segment(r1,r2-r1+1).eval(),m1.col(c1).reverse().eval().segment(r1,r2-r1+1).eval() ); VERIFY_IS_APPROX( (m1*1).topRows(r1), m1.topRows(r1) ); VERIFY_IS_APPROX( (m1*1).leftCols(c1), m1.leftCols(c1) ); VERIFY_IS_APPROX( (m1*1).transpose().topRows(c1), m1.transpose().topRows(c1) ); VERIFY_IS_APPROX( (m1*1).transpose().leftCols(r1), m1.transpose().leftCols(r1) ); VERIFY_IS_APPROX( (m1*1).transpose().middleRows(c1,c2-c1+1), m1.transpose().middleRows(c1,c2-c1+1) ); VERIFY_IS_APPROX( (m1*1).transpose().middleCols(r1,r2-r1+1), m1.transpose().middleCols(r1,r2-r1+1) ); // evaluation into plain matrices from expressions with direct access (stress MapBase) DynamicMatrixType dm; DynamicVectorType dv; dm.setZero(); dm = m1.block(r1,c1,rows-r1,cols-c1).block(r2-r1,c2-c1,rows-r2,cols-c2); VERIFY_IS_EQUAL(dm, (m1.block(r2,c2,rows-r2,cols-c2))); dm.setZero(); dv.setZero(); dm = m1.block(r1,c1,r2-r1+1,c2-c1+1).row(0).transpose(); dv = m1.row(r1).segment(c1,c2-c1+1); VERIFY_IS_EQUAL(dv, dm); dm.setZero(); dv.setZero(); dm = m1.col(c1).segment(r1,r2-r1+1); dv = m1.block(r1,c1,r2-r1+1,c2-c1+1).col(0); VERIFY_IS_EQUAL(dv, dm); dm.setZero(); dv.setZero(); dm = m1.block(r1,c1,r2-r1+1,c2-c1+1).transpose().col(0); dv = m1.row(r1).segment(c1,c2-c1+1); VERIFY_IS_EQUAL(dv, dm); dm.setZero(); dv.setZero(); dm = m1.row(r1).segment(c1,c2-c1+1).transpose(); dv = m1.transpose().block(c1,r1,c2-c1+1,r2-r1+1).col(0); VERIFY_IS_EQUAL(dv, dm); VERIFY_IS_EQUAL( (m1.template block(1,0,0,1)), m1.block(1,0,0,1)); VERIFY_IS_EQUAL( (m1.template block<1,Dynamic>(0,1,1,0)), m1.block(0,1,1,0)); VERIFY_IS_EQUAL( ((m1*1).template block(1,0,0,1)), m1.block(1,0,0,1)); VERIFY_IS_EQUAL( ((m1*1).template block<1,Dynamic>(0,1,1,0)), m1.block(0,1,1,0)); VERIFY_IS_EQUAL( m1.template subVector(r1), m1.row(r1) ); VERIFY_IS_APPROX( (m1+m1).template subVector(r1), (m1+m1).row(r1) ); VERIFY_IS_EQUAL( m1.template subVector(c1), m1.col(c1) ); VERIFY_IS_APPROX( (m1+m1).template subVector(c1), (m1+m1).col(c1) ); VERIFY_IS_EQUAL( m1.template subVectors(), m1.rows() ); VERIFY_IS_EQUAL( m1.template subVectors(), m1.cols() ); if (rows>=2 || cols>=2) { VERIFY_IS_EQUAL( int(m1.middleCols(0,0).IsRowMajor), int(m1.IsRowMajor) ); VERIFY_IS_EQUAL( m1.middleCols(0,0).outerSize(), m1.IsRowMajor ? rows : 0); VERIFY_IS_EQUAL( m1.middleCols(0,0).innerSize(), m1.IsRowMajor ? 0 : rows); VERIFY_IS_EQUAL( int(m1.middleRows(0,0).IsRowMajor), int(m1.IsRowMajor) ); VERIFY_IS_EQUAL( m1.middleRows(0,0).outerSize(), m1.IsRowMajor ? 0 : cols); VERIFY_IS_EQUAL( m1.middleRows(0,0).innerSize(), m1.IsRowMajor ? cols : 0); } } template typename internal::enable_if::type compare_using_data_and_stride(const MatrixType& m) { Index rows = m.rows(); Index cols = m.cols(); Index size = m.size(); Index innerStride = m.innerStride(); Index rowStride = m.rowStride(); Index colStride = m.colStride(); const typename MatrixType::Scalar* data = m.data(); for(int j=0;j typename internal::enable_if::type compare_using_data_and_stride(const MatrixType& m) { Index rows = m.rows(); Index cols = m.cols(); Index innerStride = m.innerStride(); Index outerStride = m.outerStride(); Index rowStride = m.rowStride(); Index colStride = m.colStride(); const typename MatrixType::Scalar* data = m.data(); for(int j=0;j void data_and_stride(const MatrixType& m) { Index rows = m.rows(); Index cols = m.cols(); Index r1 = internal::random(0,rows-1); Index r2 = internal::random(r1,rows-1); Index c1 = internal::random(0,cols-1); Index c2 = internal::random(c1,cols-1); MatrixType m1 = MatrixType::Random(rows, cols); compare_using_data_and_stride(m1.block(r1, c1, r2-r1+1, c2-c1+1)); compare_using_data_and_stride(m1.transpose().block(c1, r1, c2-c1+1, r2-r1+1)); compare_using_data_and_stride(m1.row(r1)); compare_using_data_and_stride(m1.col(c1)); compare_using_data_and_stride(m1.row(r1).transpose()); compare_using_data_and_stride(m1.col(c1).transpose()); } EIGEN_DECLARE_TEST(block) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( block(Matrix()) ); CALL_SUBTEST_1( block(Matrix(internal::random(2,50))) ); CALL_SUBTEST_1( block(Matrix(internal::random(2,50))) ); CALL_SUBTEST_2( block(Matrix4d()) ); CALL_SUBTEST_3( block(MatrixXcf(internal::random(2,50), internal::random(2,50))) ); CALL_SUBTEST_4( block(MatrixXi(internal::random(2,50), internal::random(2,50))) ); CALL_SUBTEST_5( block(MatrixXcd(internal::random(2,50), internal::random(2,50))) ); CALL_SUBTEST_6( block(MatrixXf(internal::random(2,50), internal::random(2,50))) ); CALL_SUBTEST_7( block(Matrix(internal::random(2,50), internal::random(2,50))) ); CALL_SUBTEST_8( block(Matrix(3, 4)) ); #ifndef EIGEN_DEFAULT_TO_ROW_MAJOR CALL_SUBTEST_6( data_and_stride(MatrixXf(internal::random(5,50), internal::random(5,50))) ); CALL_SUBTEST_7( data_and_stride(Matrix(internal::random(5,50), internal::random(5,50))) ); #endif } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/boostmultiprec.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include #ifdef EIGEN_TEST_MAX_SIZE #undef EIGEN_TEST_MAX_SIZE #endif #define EIGEN_TEST_MAX_SIZE 50 #ifdef EIGEN_TEST_PART_1 #include "cholesky.cpp" #endif #ifdef EIGEN_TEST_PART_2 #include "lu.cpp" #endif #ifdef EIGEN_TEST_PART_3 #include "qr.cpp" #endif #ifdef EIGEN_TEST_PART_4 #include "qr_colpivoting.cpp" #endif #ifdef EIGEN_TEST_PART_5 #include "qr_fullpivoting.cpp" #endif #ifdef EIGEN_TEST_PART_6 #include "eigensolver_selfadjoint.cpp" #endif #ifdef EIGEN_TEST_PART_7 #include "eigensolver_generic.cpp" #endif #ifdef EIGEN_TEST_PART_8 #include "eigensolver_generalized_real.cpp" #endif #ifdef EIGEN_TEST_PART_9 #include "jacobisvd.cpp" #endif #ifdef EIGEN_TEST_PART_10 #include "bdcsvd.cpp" #endif #ifdef EIGEN_TEST_PART_11 #include "simplicial_cholesky.cpp" #endif #include #undef min #undef max #undef isnan #undef isinf #undef isfinite #undef I #include #include #include #include #include typedef boost::multiprecision::number, boost::multiprecision::et_on> Real; namespace Eigen { template<> struct NumTraits : GenericNumTraits { static inline Real dummy_precision() { return 1e-50; } }; template struct NumTraits > : NumTraits {}; template<> Real test_precision() { return 1e-50; } // needed in C++93 mode where number does not support explicit cast. namespace internal { template struct cast_impl { static inline NewType run(const Real& x) { return x.template convert_to(); } }; template<> struct cast_impl > { static inline std::complex run(const Real& x) { return std::complex(x); } }; } } namespace boost { namespace multiprecision { // to make ADL works as expected: using boost::math::isfinite; using boost::math::isnan; using boost::math::isinf; using boost::math::copysign; using boost::math::hypot; // The following is needed for std::complex: Real fabs(const Real& a) { return abs EIGEN_NOT_A_MACRO (a); } Real fmax(const Real& a, const Real& b) { using std::max; return max(a,b); } // some specialization for the unit tests: inline bool test_isMuchSmallerThan(const Real& a, const Real& b) { return internal::isMuchSmallerThan(a, b, test_precision()); } inline bool test_isApprox(const Real& a, const Real& b) { return internal::isApprox(a, b, test_precision()); } inline bool test_isApproxOrLessThan(const Real& a, const Real& b) { return internal::isApproxOrLessThan(a, b, test_precision()); } Real get_test_precision(const Real&) { return test_precision(); } Real test_relative_error(const Real &a, const Real &b) { using Eigen::numext::abs2; return sqrt(abs2(a-b)/Eigen::numext::mini(abs2(a),abs2(b))); } } } namespace Eigen { } EIGEN_DECLARE_TEST(boostmultiprec) { typedef Matrix Mat; typedef Matrix,Dynamic,Dynamic> MatC; std::cout << "NumTraits::epsilon() = " << NumTraits::epsilon() << std::endl; std::cout << "NumTraits::dummy_precision() = " << NumTraits::dummy_precision() << std::endl; std::cout << "NumTraits::lowest() = " << NumTraits::lowest() << std::endl; std::cout << "NumTraits::highest() = " << NumTraits::highest() << std::endl; std::cout << "NumTraits::digits10() = " << NumTraits::digits10() << std::endl; // check stream output { Mat A(10,10); A.setRandom(); std::stringstream ss; ss << A; } { MatC A(10,10); A.setRandom(); std::stringstream ss; ss << A; } for(int i = 0; i < g_repeat; i++) { int s = internal::random(1,EIGEN_TEST_MAX_SIZE); CALL_SUBTEST_1( cholesky(Mat(s,s)) ); CALL_SUBTEST_2( lu_non_invertible() ); CALL_SUBTEST_2( lu_invertible() ); CALL_SUBTEST_2( lu_non_invertible() ); CALL_SUBTEST_2( lu_invertible() ); CALL_SUBTEST_3( qr(Mat(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_3( qr_invertible() ); CALL_SUBTEST_4( qr() ); CALL_SUBTEST_4( cod() ); CALL_SUBTEST_4( qr_invertible() ); CALL_SUBTEST_5( qr() ); CALL_SUBTEST_5( qr_invertible() ); CALL_SUBTEST_6( selfadjointeigensolver(Mat(s,s)) ); CALL_SUBTEST_7( eigensolver(Mat(s,s)) ); CALL_SUBTEST_8( generalized_eigensolver_real(Mat(s,s)) ); TEST_SET_BUT_UNUSED_VARIABLE(s) } CALL_SUBTEST_9(( jacobisvd(Mat(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); CALL_SUBTEST_10(( bdcsvd(Mat(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); CALL_SUBTEST_11(( test_simplicial_cholesky_T() )); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/bug1213.cpp ================================================ // This anonymous enum is essential to trigger the linking issue enum { Foo }; #include "bug1213.h" bool bug1213_1(const Eigen::Vector3f& x) { return bug1213_2(x); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/bug1213.h ================================================ #include template bool bug1213_2(const Eigen::Matrix& x); bool bug1213_1(const Eigen::Vector3f& x); ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/bug1213_main.cpp ================================================ // This is a regression unit regarding a weird linking issue with gcc. #include "bug1213.h" int main() { return 0; } template bool bug1213_2(const Eigen::Matrix& ) { return true; } template bool bug1213_2(const Eigen::Vector3f&); ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/cholesky.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define TEST_ENABLE_TEMPORARY_TRACKING #include "main.h" #include #include #include "solverbase.h" template typename MatrixType::RealScalar matrix_l1_norm(const MatrixType& m) { if(m.cols()==0) return typename MatrixType::RealScalar(0); MatrixType symm = m.template selfadjointView(); return symm.cwiseAbs().colwise().sum().maxCoeff(); } template class CholType> void test_chol_update(const MatrixType& symm) { typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef Matrix VectorType; MatrixType symmLo = symm.template triangularView(); MatrixType symmUp = symm.template triangularView(); MatrixType symmCpy = symm; CholType chollo(symmLo); CholType cholup(symmUp); for (int k=0; k<10; ++k) { VectorType vec = VectorType::Random(symm.rows()); RealScalar sigma = internal::random(); symmCpy += sigma * vec * vec.adjoint(); // we are doing some downdates, so it might be the case that the matrix is not SPD anymore CholType chol(symmCpy); if(chol.info()!=Success) break; chollo.rankUpdate(vec, sigma); VERIFY_IS_APPROX(symmCpy, chollo.reconstructedMatrix()); cholup.rankUpdate(vec, sigma); VERIFY_IS_APPROX(symmCpy, cholup.reconstructedMatrix()); } } template void cholesky(const MatrixType& m) { /* this test covers the following files: LLT.h LDLT.h */ Index rows = m.rows(); Index cols = m.cols(); typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef Matrix SquareMatrixType; typedef Matrix VectorType; MatrixType a0 = MatrixType::Random(rows,cols); VectorType vecB = VectorType::Random(rows), vecX(rows); MatrixType matB = MatrixType::Random(rows,cols), matX(rows,cols); SquareMatrixType symm = a0 * a0.adjoint(); // let's make sure the matrix is not singular or near singular for (int k=0; k<3; ++k) { MatrixType a1 = MatrixType::Random(rows,cols); symm += a1 * a1.adjoint(); } { STATIC_CHECK(( internal::is_same::StorageIndex,int>::value )); STATIC_CHECK(( internal::is_same::StorageIndex,int>::value )); SquareMatrixType symmUp = symm.template triangularView(); SquareMatrixType symmLo = symm.template triangularView(); LLT chollo(symmLo); VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix()); check_solverbase(symm, chollo, rows, rows, 1); check_solverbase(symm, chollo, rows, cols, rows); const MatrixType symmLo_inverse = chollo.solve(MatrixType::Identity(rows,cols)); RealScalar rcond = (RealScalar(1) / matrix_l1_norm(symmLo)) / matrix_l1_norm(symmLo_inverse); RealScalar rcond_est = chollo.rcond(); // Verify that the estimated condition number is within a factor of 10 of the // truth. VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10); // test the upper mode LLT cholup(symmUp); VERIFY_IS_APPROX(symm, cholup.reconstructedMatrix()); vecX = cholup.solve(vecB); VERIFY_IS_APPROX(symm * vecX, vecB); matX = cholup.solve(matB); VERIFY_IS_APPROX(symm * matX, matB); // Verify that the estimated condition number is within a factor of 10 of the // truth. const MatrixType symmUp_inverse = cholup.solve(MatrixType::Identity(rows,cols)); rcond = (RealScalar(1) / matrix_l1_norm(symmUp)) / matrix_l1_norm(symmUp_inverse); rcond_est = cholup.rcond(); VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10); MatrixType neg = -symmLo; chollo.compute(neg); VERIFY(neg.size()==0 || chollo.info()==NumericalIssue); VERIFY_IS_APPROX(MatrixType(chollo.matrixL().transpose().conjugate()), MatrixType(chollo.matrixU())); VERIFY_IS_APPROX(MatrixType(chollo.matrixU().transpose().conjugate()), MatrixType(chollo.matrixL())); VERIFY_IS_APPROX(MatrixType(cholup.matrixL().transpose().conjugate()), MatrixType(cholup.matrixU())); VERIFY_IS_APPROX(MatrixType(cholup.matrixU().transpose().conjugate()), MatrixType(cholup.matrixL())); // test some special use cases of SelfCwiseBinaryOp: MatrixType m1 = MatrixType::Random(rows,cols), m2(rows,cols); m2 = m1; m2 += symmLo.template selfadjointView().llt().solve(matB); VERIFY_IS_APPROX(m2, m1 + symmLo.template selfadjointView().llt().solve(matB)); m2 = m1; m2 -= symmLo.template selfadjointView().llt().solve(matB); VERIFY_IS_APPROX(m2, m1 - symmLo.template selfadjointView().llt().solve(matB)); m2 = m1; m2.noalias() += symmLo.template selfadjointView().llt().solve(matB); VERIFY_IS_APPROX(m2, m1 + symmLo.template selfadjointView().llt().solve(matB)); m2 = m1; m2.noalias() -= symmLo.template selfadjointView().llt().solve(matB); VERIFY_IS_APPROX(m2, m1 - symmLo.template selfadjointView().llt().solve(matB)); } // LDLT { STATIC_CHECK(( internal::is_same::StorageIndex,int>::value )); STATIC_CHECK(( internal::is_same::StorageIndex,int>::value )); int sign = internal::random()%2 ? 1 : -1; if(sign == -1) { symm = -symm; // test a negative matrix } SquareMatrixType symmUp = symm.template triangularView(); SquareMatrixType symmLo = symm.template triangularView(); LDLT ldltlo(symmLo); VERIFY(ldltlo.info()==Success); VERIFY_IS_APPROX(symm, ldltlo.reconstructedMatrix()); check_solverbase(symm, ldltlo, rows, rows, 1); check_solverbase(symm, ldltlo, rows, cols, rows); const MatrixType symmLo_inverse = ldltlo.solve(MatrixType::Identity(rows,cols)); RealScalar rcond = (RealScalar(1) / matrix_l1_norm(symmLo)) / matrix_l1_norm(symmLo_inverse); RealScalar rcond_est = ldltlo.rcond(); // Verify that the estimated condition number is within a factor of 10 of the // truth. VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10); LDLT ldltup(symmUp); VERIFY(ldltup.info()==Success); VERIFY_IS_APPROX(symm, ldltup.reconstructedMatrix()); vecX = ldltup.solve(vecB); VERIFY_IS_APPROX(symm * vecX, vecB); matX = ldltup.solve(matB); VERIFY_IS_APPROX(symm * matX, matB); // Verify that the estimated condition number is within a factor of 10 of the // truth. const MatrixType symmUp_inverse = ldltup.solve(MatrixType::Identity(rows,cols)); rcond = (RealScalar(1) / matrix_l1_norm(symmUp)) / matrix_l1_norm(symmUp_inverse); rcond_est = ldltup.rcond(); VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10); VERIFY_IS_APPROX(MatrixType(ldltlo.matrixL().transpose().conjugate()), MatrixType(ldltlo.matrixU())); VERIFY_IS_APPROX(MatrixType(ldltlo.matrixU().transpose().conjugate()), MatrixType(ldltlo.matrixL())); VERIFY_IS_APPROX(MatrixType(ldltup.matrixL().transpose().conjugate()), MatrixType(ldltup.matrixU())); VERIFY_IS_APPROX(MatrixType(ldltup.matrixU().transpose().conjugate()), MatrixType(ldltup.matrixL())); if(MatrixType::RowsAtCompileTime==Dynamic) { // note : each inplace permutation requires a small temporary vector (mask) // check inplace solve matX = matB; VERIFY_EVALUATION_COUNT(matX = ldltlo.solve(matX), 0); VERIFY_IS_APPROX(matX, ldltlo.solve(matB).eval()); matX = matB; VERIFY_EVALUATION_COUNT(matX = ldltup.solve(matX), 0); VERIFY_IS_APPROX(matX, ldltup.solve(matB).eval()); } // restore if(sign == -1) symm = -symm; // check matrices coming from linear constraints with Lagrange multipliers if(rows>=3) { SquareMatrixType A = symm; Index c = internal::random(0,rows-2); A.bottomRightCorner(c,c).setZero(); // Make sure a solution exists: vecX.setRandom(); vecB = A * vecX; vecX.setZero(); ldltlo.compute(A); VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix()); vecX = ldltlo.solve(vecB); VERIFY_IS_APPROX(A * vecX, vecB); } // check non-full rank matrices if(rows>=3) { Index r = internal::random(1,rows-1); Matrix a = Matrix::Random(rows,r); SquareMatrixType A = a * a.adjoint(); // Make sure a solution exists: vecX.setRandom(); vecB = A * vecX; vecX.setZero(); ldltlo.compute(A); VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix()); vecX = ldltlo.solve(vecB); VERIFY_IS_APPROX(A * vecX, vecB); } // check matrices with a wide spectrum if(rows>=3) { using std::pow; using std::sqrt; RealScalar s = (std::min)(16,std::numeric_limits::max_exponent10/8); Matrix a = Matrix::Random(rows,rows); Matrix d = Matrix::Random(rows); for(Index k=0; k(-s,s)); SquareMatrixType A = a * d.asDiagonal() * a.adjoint(); // Make sure a solution exists: vecX.setRandom(); vecB = A * vecX; vecX.setZero(); ldltlo.compute(A); VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix()); vecX = ldltlo.solve(vecB); if(ldltlo.vectorD().real().cwiseAbs().minCoeff()>RealScalar(0)) { VERIFY_IS_APPROX(A * vecX,vecB); } else { RealScalar large_tol = sqrt(test_precision()); VERIFY((A * vecX).isApprox(vecB, large_tol)); ++g_test_level; VERIFY_IS_APPROX(A * vecX,vecB); --g_test_level; } } } // update/downdate CALL_SUBTEST(( test_chol_update(symm) )); CALL_SUBTEST(( test_chol_update(symm) )); } template void cholesky_cplx(const MatrixType& m) { // classic test cholesky(m); // test mixing real/scalar types Index rows = m.rows(); Index cols = m.cols(); typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef Matrix RealMatrixType; typedef Matrix VectorType; RealMatrixType a0 = RealMatrixType::Random(rows,cols); VectorType vecB = VectorType::Random(rows), vecX(rows); MatrixType matB = MatrixType::Random(rows,cols), matX(rows,cols); RealMatrixType symm = a0 * a0.adjoint(); // let's make sure the matrix is not singular or near singular for (int k=0; k<3; ++k) { RealMatrixType a1 = RealMatrixType::Random(rows,cols); symm += a1 * a1.adjoint(); } { RealMatrixType symmLo = symm.template triangularView(); LLT chollo(symmLo); VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix()); check_solverbase(symm, chollo, rows, rows, 1); //check_solverbase(symm, chollo, rows, cols, rows); } // LDLT { int sign = internal::random()%2 ? 1 : -1; if(sign == -1) { symm = -symm; // test a negative matrix } RealMatrixType symmLo = symm.template triangularView(); LDLT ldltlo(symmLo); VERIFY(ldltlo.info()==Success); VERIFY_IS_APPROX(symm, ldltlo.reconstructedMatrix()); check_solverbase(symm, ldltlo, rows, rows, 1); //check_solverbase(symm, ldltlo, rows, cols, rows); } } // regression test for bug 241 template void cholesky_bug241(const MatrixType& m) { eigen_assert(m.rows() == 2 && m.cols() == 2); typedef typename MatrixType::Scalar Scalar; typedef Matrix VectorType; MatrixType matA; matA << 1, 1, 1, 1; VectorType vecB; vecB << 1, 1; VectorType vecX = matA.ldlt().solve(vecB); VERIFY_IS_APPROX(matA * vecX, vecB); } // LDLT is not guaranteed to work for indefinite matrices, but happens to work fine if matrix is diagonal. // This test checks that LDLT reports correctly that matrix is indefinite. // See http://forum.kde.org/viewtopic.php?f=74&t=106942 and bug 736 template void cholesky_definiteness(const MatrixType& m) { eigen_assert(m.rows() == 2 && m.cols() == 2); MatrixType mat; LDLT ldlt(2); { mat << 1, 0, 0, -1; ldlt.compute(mat); VERIFY(ldlt.info()==Success); VERIFY(!ldlt.isNegative()); VERIFY(!ldlt.isPositive()); VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); } { mat << 1, 2, 2, 1; ldlt.compute(mat); VERIFY(ldlt.info()==Success); VERIFY(!ldlt.isNegative()); VERIFY(!ldlt.isPositive()); VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); } { mat << 0, 0, 0, 0; ldlt.compute(mat); VERIFY(ldlt.info()==Success); VERIFY(ldlt.isNegative()); VERIFY(ldlt.isPositive()); VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); } { mat << 0, 0, 0, 1; ldlt.compute(mat); VERIFY(ldlt.info()==Success); VERIFY(!ldlt.isNegative()); VERIFY(ldlt.isPositive()); VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); } { mat << -1, 0, 0, 0; ldlt.compute(mat); VERIFY(ldlt.info()==Success); VERIFY(ldlt.isNegative()); VERIFY(!ldlt.isPositive()); VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); } } template void cholesky_faillure_cases() { MatrixXd mat; LDLT ldlt; { mat.resize(2,2); mat << 0, 1, 1, 0; ldlt.compute(mat); VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix()); VERIFY(ldlt.info()==NumericalIssue); } #if (!EIGEN_ARCH_i386) || defined(EIGEN_VECTORIZE_SSE2) { mat.resize(3,3); mat << -1, -3, 3, -3, -8.9999999999999999999, 1, 3, 1, 0; ldlt.compute(mat); VERIFY(ldlt.info()==NumericalIssue); VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix()); } #endif { mat.resize(3,3); mat << 1, 2, 3, 2, 4, 1, 3, 1, 0; ldlt.compute(mat); VERIFY(ldlt.info()==NumericalIssue); VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix()); } { mat.resize(8,8); mat << 0.1, 0, -0.1, 0, 0, 0, 1, 0, 0, 4.24667, 0, 2.00333, 0, 0, 0, 0, -0.1, 0, 0.2, 0, -0.1, 0, 0, 0, 0, 2.00333, 0, 8.49333, 0, 2.00333, 0, 0, 0, 0, -0.1, 0, 0.1, 0, 0, 1, 0, 0, 0, 2.00333, 0, 4.24667, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0; ldlt.compute(mat); VERIFY(ldlt.info()==NumericalIssue); VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix()); } // bug 1479 { mat.resize(4,4); mat << 1, 2, 0, 1, 2, 4, 0, 2, 0, 0, 0, 1, 1, 2, 1, 1; ldlt.compute(mat); VERIFY(ldlt.info()==NumericalIssue); VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix()); } } template void cholesky_verify_assert() { MatrixType tmp; LLT llt; VERIFY_RAISES_ASSERT(llt.matrixL()) VERIFY_RAISES_ASSERT(llt.matrixU()) VERIFY_RAISES_ASSERT(llt.solve(tmp)) VERIFY_RAISES_ASSERT(llt.transpose().solve(tmp)) VERIFY_RAISES_ASSERT(llt.adjoint().solve(tmp)) VERIFY_RAISES_ASSERT(llt.solveInPlace(tmp)) LDLT ldlt; VERIFY_RAISES_ASSERT(ldlt.matrixL()) VERIFY_RAISES_ASSERT(ldlt.transpositionsP()) VERIFY_RAISES_ASSERT(ldlt.vectorD()) VERIFY_RAISES_ASSERT(ldlt.isPositive()) VERIFY_RAISES_ASSERT(ldlt.isNegative()) VERIFY_RAISES_ASSERT(ldlt.solve(tmp)) VERIFY_RAISES_ASSERT(ldlt.transpose().solve(tmp)) VERIFY_RAISES_ASSERT(ldlt.adjoint().solve(tmp)) VERIFY_RAISES_ASSERT(ldlt.solveInPlace(tmp)) } EIGEN_DECLARE_TEST(cholesky) { int s = 0; for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( cholesky(Matrix()) ); CALL_SUBTEST_3( cholesky(Matrix2d()) ); CALL_SUBTEST_3( cholesky_bug241(Matrix2d()) ); CALL_SUBTEST_3( cholesky_definiteness(Matrix2d()) ); CALL_SUBTEST_4( cholesky(Matrix3f()) ); CALL_SUBTEST_5( cholesky(Matrix4d()) ); s = internal::random(1,EIGEN_TEST_MAX_SIZE); CALL_SUBTEST_2( cholesky(MatrixXd(s,s)) ); TEST_SET_BUT_UNUSED_VARIABLE(s) s = internal::random(1,EIGEN_TEST_MAX_SIZE/2); CALL_SUBTEST_6( cholesky_cplx(MatrixXcd(s,s)) ); TEST_SET_BUT_UNUSED_VARIABLE(s) } // empty matrix, regression test for Bug 785: CALL_SUBTEST_2( cholesky(MatrixXd(0,0)) ); // This does not work yet: // CALL_SUBTEST_2( cholesky(Matrix()) ); CALL_SUBTEST_4( cholesky_verify_assert() ); CALL_SUBTEST_7( cholesky_verify_assert() ); CALL_SUBTEST_8( cholesky_verify_assert() ); CALL_SUBTEST_2( cholesky_verify_assert() ); // Test problem size constructors CALL_SUBTEST_9( LLT(10) ); CALL_SUBTEST_9( LDLT(10) ); CALL_SUBTEST_2( cholesky_faillure_cases() ); TEST_SET_BUT_UNUSED_VARIABLE(nb_temporaries) } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/cholmod_support.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_NO_DEBUG_SMALL_PRODUCT_BLOCKS #include "sparse_solver.h" #include template void test_cholmod_ST() { CholmodDecomposition g_chol_colmajor_lower; g_chol_colmajor_lower.setMode(CholmodSupernodalLLt); CholmodDecomposition g_chol_colmajor_upper; g_chol_colmajor_upper.setMode(CholmodSupernodalLLt); CholmodDecomposition g_llt_colmajor_lower; g_llt_colmajor_lower.setMode(CholmodSimplicialLLt); CholmodDecomposition g_llt_colmajor_upper; g_llt_colmajor_upper.setMode(CholmodSimplicialLLt); CholmodDecomposition g_ldlt_colmajor_lower; g_ldlt_colmajor_lower.setMode(CholmodLDLt); CholmodDecomposition g_ldlt_colmajor_upper; g_ldlt_colmajor_upper.setMode(CholmodLDLt); CholmodSupernodalLLT chol_colmajor_lower; CholmodSupernodalLLT chol_colmajor_upper; CholmodSimplicialLLT llt_colmajor_lower; CholmodSimplicialLLT llt_colmajor_upper; CholmodSimplicialLDLT ldlt_colmajor_lower; CholmodSimplicialLDLT ldlt_colmajor_upper; check_sparse_spd_solving(g_chol_colmajor_lower); check_sparse_spd_solving(g_chol_colmajor_upper); check_sparse_spd_solving(g_llt_colmajor_lower); check_sparse_spd_solving(g_llt_colmajor_upper); check_sparse_spd_solving(g_ldlt_colmajor_lower); check_sparse_spd_solving(g_ldlt_colmajor_upper); check_sparse_spd_solving(chol_colmajor_lower); check_sparse_spd_solving(chol_colmajor_upper); check_sparse_spd_solving(llt_colmajor_lower); check_sparse_spd_solving(llt_colmajor_upper); check_sparse_spd_solving(ldlt_colmajor_lower); check_sparse_spd_solving(ldlt_colmajor_upper); check_sparse_spd_determinant(chol_colmajor_lower); check_sparse_spd_determinant(chol_colmajor_upper); check_sparse_spd_determinant(llt_colmajor_lower); check_sparse_spd_determinant(llt_colmajor_upper); check_sparse_spd_determinant(ldlt_colmajor_lower); check_sparse_spd_determinant(ldlt_colmajor_upper); } template void test_cholmod_T() { test_cholmod_ST >(); } EIGEN_DECLARE_TEST(cholmod_support) { CALL_SUBTEST_11( (test_cholmod_T()) ); CALL_SUBTEST_12( (test_cholmod_T()) ); CALL_SUBTEST_13( (test_cholmod_T()) ); CALL_SUBTEST_14( (test_cholmod_T()) ); CALL_SUBTEST_21( (test_cholmod_T, ColMajor, int >()) ); CALL_SUBTEST_22( (test_cholmod_T, ColMajor, long>()) ); // TODO complex row-major matrices do not work at the moment: // CALL_SUBTEST_23( (test_cholmod_T, RowMajor, int >()) ); // CALL_SUBTEST_24( (test_cholmod_T, RowMajor, long>()) ); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/commainitializer.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" template void test_blocks() { Matrix m_fixed; MatrixXi m_dynamic(M1+M2, N1+N2); Matrix mat11; mat11.setRandom(); Matrix mat12; mat12.setRandom(); Matrix mat21; mat21.setRandom(); Matrix mat22; mat22.setRandom(); MatrixXi matx11 = mat11, matx12 = mat12, matx21 = mat21, matx22 = mat22; { VERIFY_IS_EQUAL((m_fixed << mat11, mat12, mat21, matx22).finished(), (m_dynamic << mat11, matx12, mat21, matx22).finished()); VERIFY_IS_EQUAL((m_fixed.template topLeftCorner()), mat11); VERIFY_IS_EQUAL((m_fixed.template topRightCorner()), mat12); VERIFY_IS_EQUAL((m_fixed.template bottomLeftCorner()), mat21); VERIFY_IS_EQUAL((m_fixed.template bottomRightCorner()), mat22); VERIFY_IS_EQUAL((m_fixed << mat12, mat11, matx21, mat22).finished(), (m_dynamic << mat12, matx11, matx21, mat22).finished()); } if(N1 > 0) { if(M1 > 0) { VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat11, mat21, mat22)); } if(M2 > 0) { VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat21, mat21, mat22)); } } else { // allow insertion of zero-column blocks: VERIFY_IS_EQUAL((m_fixed << mat11, mat12, mat11, mat11, mat21, mat21, mat22).finished(), (m_dynamic << mat12, mat22).finished()); } if(M1 != M2) { VERIFY_RAISES_ASSERT((m_fixed << mat11, mat21, mat12, mat22)); } } template struct test_block_recursion { static void run() { test_block_recursion::run(); test_block_recursion::run(); } }; template struct test_block_recursion<0,N> { static void run() { test_blocks<(N>>6)&3, (N>>4)&3, (N>>2)&3, N & 3>(); } }; void test_basics() { Matrix3d m3; Matrix4d m4; VERIFY_RAISES_ASSERT( (m3 << 1, 2, 3, 4, 5, 6, 7, 8) ); #ifndef _MSC_VER VERIFY_RAISES_ASSERT( (m3 << 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) ); #endif double data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; Matrix3d ref = Map >(data); m3 = Matrix3d::Random(); m3 << 1, 2, 3, 4, 5, 6, 7, 8, 9; VERIFY_IS_APPROX(m3, ref ); Vector3d vec[3]; vec[0] << 1, 4, 7; vec[1] << 2, 5, 8; vec[2] << 3, 6, 9; m3 = Matrix3d::Random(); m3 << vec[0], vec[1], vec[2]; VERIFY_IS_APPROX(m3, ref); vec[0] << 1, 2, 3; vec[1] << 4, 5, 6; vec[2] << 7, 8, 9; m3 = Matrix3d::Random(); m3 << vec[0].transpose(), 4, 5, 6, vec[2].transpose(); VERIFY_IS_APPROX(m3, ref); } EIGEN_DECLARE_TEST(commainitializer) { CALL_SUBTEST_1(test_basics()); // recursively test all block-sizes from 0 to 3: CALL_SUBTEST_2(test_block_recursion<8>::run()); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/conjugate_gradient.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "sparse_solver.h" #include template void test_conjugate_gradient_T() { typedef SparseMatrix SparseMatrixType; ConjugateGradient cg_colmajor_lower_diag; ConjugateGradient cg_colmajor_upper_diag; ConjugateGradient cg_colmajor_loup_diag; ConjugateGradient cg_colmajor_lower_I; ConjugateGradient cg_colmajor_upper_I; CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_lower_diag) ); CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_diag) ); CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_loup_diag) ); CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_lower_I) ); CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_I) ); } EIGEN_DECLARE_TEST(conjugate_gradient) { CALL_SUBTEST_1(( test_conjugate_gradient_T() )); CALL_SUBTEST_2(( test_conjugate_gradient_T, int>() )); CALL_SUBTEST_3(( test_conjugate_gradient_T() )); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/conservative_resize.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include #include "AnnoyingScalar.h" using namespace Eigen; template void run_matrix_tests() { typedef Matrix MatrixType; MatrixType m, n; // boundary cases ... m = n = MatrixType::Random(50,50); m.conservativeResize(1,50); VERIFY_IS_APPROX(m, n.block(0,0,1,50)); m = n = MatrixType::Random(50,50); m.conservativeResize(50,1); VERIFY_IS_APPROX(m, n.block(0,0,50,1)); m = n = MatrixType::Random(50,50); m.conservativeResize(50,50); VERIFY_IS_APPROX(m, n.block(0,0,50,50)); // random shrinking ... for (int i=0; i<25; ++i) { const Index rows = internal::random(1,50); const Index cols = internal::random(1,50); m = n = MatrixType::Random(50,50); m.conservativeResize(rows,cols); VERIFY_IS_APPROX(m, n.block(0,0,rows,cols)); } // random growing with zeroing ... for (int i=0; i<25; ++i) { const Index rows = internal::random(50,75); const Index cols = internal::random(50,75); m = n = MatrixType::Random(50,50); m.conservativeResizeLike(MatrixType::Zero(rows,cols)); VERIFY_IS_APPROX(m.block(0,0,n.rows(),n.cols()), n); VERIFY( rows<=50 || m.block(50,0,rows-50,cols).sum() == Scalar(0) ); VERIFY( cols<=50 || m.block(0,50,rows,cols-50).sum() == Scalar(0) ); } } template void run_vector_tests() { typedef Matrix VectorType; VectorType m, n; // boundary cases ... m = n = VectorType::Random(50); m.conservativeResize(1); VERIFY_IS_APPROX(m, n.segment(0,1)); m = n = VectorType::Random(50); m.conservativeResize(50); VERIFY_IS_APPROX(m, n.segment(0,50)); m = n = VectorType::Random(50); m.conservativeResize(m.rows(),1); VERIFY_IS_APPROX(m, n.segment(0,1)); m = n = VectorType::Random(50); m.conservativeResize(m.rows(),50); VERIFY_IS_APPROX(m, n.segment(0,50)); // random shrinking ... for (int i=0; i<50; ++i) { const int size = internal::random(1,50); m = n = VectorType::Random(50); m.conservativeResize(size); VERIFY_IS_APPROX(m, n.segment(0,size)); m = n = VectorType::Random(50); m.conservativeResize(m.rows(), size); VERIFY_IS_APPROX(m, n.segment(0,size)); } // random growing with zeroing ... for (int i=0; i<50; ++i) { const int size = internal::random(50,100); m = n = VectorType::Random(50); m.conservativeResizeLike(VectorType::Zero(size)); VERIFY_IS_APPROX(m.segment(0,50), n); VERIFY( size<=50 || m.segment(50,size-50).sum() == Scalar(0) ); m = n = VectorType::Random(50); m.conservativeResizeLike(Matrix::Zero(1,size)); VERIFY_IS_APPROX(m.segment(0,50), n); VERIFY( size<=50 || m.segment(50,size-50).sum() == Scalar(0) ); } } // Basic memory leak check with a non-copyable scalar type template void noncopyable() { typedef Eigen::Matrix VectorType; typedef Eigen::Matrix MatrixType; { #ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW AnnoyingScalar::dont_throw = true; #endif int n = 50; VectorType v0(n), v1(n); MatrixType m0(n,n), m1(n,n), m2(n,n); v0.setOnes(); v1.setOnes(); m0.setOnes(); m1.setOnes(); m2.setOnes(); VERIFY(m0==m1); m0.conservativeResize(2*n,2*n); VERIFY(m0.topLeftCorner(n,n) == m1); VERIFY(v0.head(n) == v1); v0.conservativeResize(2*n); VERIFY(v0.head(n) == v1); } VERIFY(AnnoyingScalar::instances==0 && "global memory leak detected in noncopyable"); } EIGEN_DECLARE_TEST(conservative_resize) { for(int i=0; i())); CALL_SUBTEST_1((run_matrix_tests())); CALL_SUBTEST_2((run_matrix_tests())); CALL_SUBTEST_2((run_matrix_tests())); CALL_SUBTEST_3((run_matrix_tests())); CALL_SUBTEST_3((run_matrix_tests())); CALL_SUBTEST_4((run_matrix_tests, Eigen::RowMajor>())); CALL_SUBTEST_4((run_matrix_tests, Eigen::ColMajor>())); CALL_SUBTEST_5((run_matrix_tests, Eigen::RowMajor>())); CALL_SUBTEST_5((run_matrix_tests, Eigen::ColMajor>())); CALL_SUBTEST_1((run_matrix_tests())); CALL_SUBTEST_1((run_vector_tests())); CALL_SUBTEST_2((run_vector_tests())); CALL_SUBTEST_3((run_vector_tests())); CALL_SUBTEST_4((run_vector_tests >())); CALL_SUBTEST_5((run_vector_tests >())); #ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW AnnoyingScalar::dont_throw = true; #endif CALL_SUBTEST_6(( run_vector_tests() )); CALL_SUBTEST_6(( noncopyable<0>() )); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/constructor.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2017 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define TEST_ENABLE_TEMPORARY_TRACKING #include "main.h" template struct Wrapper { MatrixType m_mat; inline Wrapper(const MatrixType &x) : m_mat(x) {} inline operator const MatrixType& () const { return m_mat; } inline operator MatrixType& () { return m_mat; } }; enum my_sizes { M = 12, N = 7}; template void ctor_init1(const MatrixType& m) { // Check logic in PlainObjectBase::_init1 Index rows = m.rows(); Index cols = m.cols(); MatrixType m0 = MatrixType::Random(rows,cols); VERIFY_EVALUATION_COUNT( MatrixType m1(m0), 1); VERIFY_EVALUATION_COUNT( MatrixType m2(m0+m0), 1); VERIFY_EVALUATION_COUNT( MatrixType m2(m0.block(0,0,rows,cols)) , 1); Wrapper wrapper(m0); VERIFY_EVALUATION_COUNT( MatrixType m3(wrapper) , 1); } EIGEN_DECLARE_TEST(constructor) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( ctor_init1(Matrix()) ); CALL_SUBTEST_1( ctor_init1(Matrix4d()) ); CALL_SUBTEST_1( ctor_init1(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_1( ctor_init1(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } { Matrix a(123); VERIFY_IS_EQUAL(a[0], 123); } { Matrix a(123.0); VERIFY_IS_EQUAL(a[0], 123); } { Matrix a(123); VERIFY_IS_EQUAL(a[0], 123.f); } { Array a(123); VERIFY_IS_EQUAL(a[0], 123); } { Array a(123.0); VERIFY_IS_EQUAL(a[0], 123); } { Array a(123); VERIFY_IS_EQUAL(a[0], 123.f); } { Array a(123); VERIFY_IS_EQUAL(a(4), 123); } { Array a(123.0); VERIFY_IS_EQUAL(a(4), 123); } { Array a(123); VERIFY_IS_EQUAL(a(4), 123.f); } { MatrixXi m1(M,N); VERIFY_IS_EQUAL(m1.rows(),M); VERIFY_IS_EQUAL(m1.cols(),N); ArrayXXi a1(M,N); VERIFY_IS_EQUAL(a1.rows(),M); VERIFY_IS_EQUAL(a1.cols(),N); VectorXi v1(M); VERIFY_IS_EQUAL(v1.size(),M); ArrayXi a2(M); VERIFY_IS_EQUAL(a2.size(),M); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/corners.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #define COMPARE_CORNER(A,B) \ VERIFY_IS_EQUAL(matrix.A, matrix.B); \ VERIFY_IS_EQUAL(const_matrix.A, const_matrix.B); template void corners(const MatrixType& m) { Index rows = m.rows(); Index cols = m.cols(); Index r = internal::random(1,rows); Index c = internal::random(1,cols); MatrixType matrix = MatrixType::Random(rows,cols); const MatrixType const_matrix = MatrixType::Random(rows,cols); COMPARE_CORNER(topLeftCorner(r,c), block(0,0,r,c)); COMPARE_CORNER(topRightCorner(r,c), block(0,cols-c,r,c)); COMPARE_CORNER(bottomLeftCorner(r,c), block(rows-r,0,r,c)); COMPARE_CORNER(bottomRightCorner(r,c), block(rows-r,cols-c,r,c)); Index sr = internal::random(1,rows) - 1; Index nr = internal::random(1,rows-sr); Index sc = internal::random(1,cols) - 1; Index nc = internal::random(1,cols-sc); COMPARE_CORNER(topRows(r), block(0,0,r,cols)); COMPARE_CORNER(middleRows(sr,nr), block(sr,0,nr,cols)); COMPARE_CORNER(bottomRows(r), block(rows-r,0,r,cols)); COMPARE_CORNER(leftCols(c), block(0,0,rows,c)); COMPARE_CORNER(middleCols(sc,nc), block(0,sc,rows,nc)); COMPARE_CORNER(rightCols(c), block(0,cols-c,rows,c)); } template void corners_fixedsize() { MatrixType matrix = MatrixType::Random(); const MatrixType const_matrix = MatrixType::Random(); enum { rows = MatrixType::RowsAtCompileTime, cols = MatrixType::ColsAtCompileTime, r = CRows, c = CCols, sr = SRows, sc = SCols }; VERIFY_IS_EQUAL((matrix.template topLeftCorner()), (matrix.template block(0,0))); VERIFY_IS_EQUAL((matrix.template topRightCorner()), (matrix.template block(0,cols-c))); VERIFY_IS_EQUAL((matrix.template bottomLeftCorner()), (matrix.template block(rows-r,0))); VERIFY_IS_EQUAL((matrix.template bottomRightCorner()), (matrix.template block(rows-r,cols-c))); VERIFY_IS_EQUAL((matrix.template topLeftCorner()), (matrix.template topLeftCorner(r,c))); VERIFY_IS_EQUAL((matrix.template topRightCorner()), (matrix.template topRightCorner(r,c))); VERIFY_IS_EQUAL((matrix.template bottomLeftCorner()), (matrix.template bottomLeftCorner(r,c))); VERIFY_IS_EQUAL((matrix.template bottomRightCorner()), (matrix.template bottomRightCorner(r,c))); VERIFY_IS_EQUAL((matrix.template topLeftCorner()), (matrix.template topLeftCorner(r,c))); VERIFY_IS_EQUAL((matrix.template topRightCorner()), (matrix.template topRightCorner(r,c))); VERIFY_IS_EQUAL((matrix.template bottomLeftCorner()), (matrix.template bottomLeftCorner(r,c))); VERIFY_IS_EQUAL((matrix.template bottomRightCorner()), (matrix.template bottomRightCorner(r,c))); VERIFY_IS_EQUAL((matrix.template topRows()), (matrix.template block(0,0))); VERIFY_IS_EQUAL((matrix.template middleRows(sr)), (matrix.template block(sr,0))); VERIFY_IS_EQUAL((matrix.template bottomRows()), (matrix.template block(rows-r,0))); VERIFY_IS_EQUAL((matrix.template leftCols()), (matrix.template block(0,0))); VERIFY_IS_EQUAL((matrix.template middleCols(sc)), (matrix.template block(0,sc))); VERIFY_IS_EQUAL((matrix.template rightCols()), (matrix.template block(0,cols-c))); VERIFY_IS_EQUAL((const_matrix.template topLeftCorner()), (const_matrix.template block(0,0))); VERIFY_IS_EQUAL((const_matrix.template topRightCorner()), (const_matrix.template block(0,cols-c))); VERIFY_IS_EQUAL((const_matrix.template bottomLeftCorner()), (const_matrix.template block(rows-r,0))); VERIFY_IS_EQUAL((const_matrix.template bottomRightCorner()), (const_matrix.template block(rows-r,cols-c))); VERIFY_IS_EQUAL((const_matrix.template topLeftCorner()), (const_matrix.template topLeftCorner(r,c))); VERIFY_IS_EQUAL((const_matrix.template topRightCorner()), (const_matrix.template topRightCorner(r,c))); VERIFY_IS_EQUAL((const_matrix.template bottomLeftCorner()), (const_matrix.template bottomLeftCorner(r,c))); VERIFY_IS_EQUAL((const_matrix.template bottomRightCorner()), (const_matrix.template bottomRightCorner(r,c))); VERIFY_IS_EQUAL((const_matrix.template topLeftCorner()), (const_matrix.template topLeftCorner(r,c))); VERIFY_IS_EQUAL((const_matrix.template topRightCorner()), (const_matrix.template topRightCorner(r,c))); VERIFY_IS_EQUAL((const_matrix.template bottomLeftCorner()), (const_matrix.template bottomLeftCorner(r,c))); VERIFY_IS_EQUAL((const_matrix.template bottomRightCorner()), (const_matrix.template bottomRightCorner(r,c))); VERIFY_IS_EQUAL((const_matrix.template topRows()), (const_matrix.template block(0,0))); VERIFY_IS_EQUAL((const_matrix.template middleRows(sr)), (const_matrix.template block(sr,0))); VERIFY_IS_EQUAL((const_matrix.template bottomRows()), (const_matrix.template block(rows-r,0))); VERIFY_IS_EQUAL((const_matrix.template leftCols()), (const_matrix.template block(0,0))); VERIFY_IS_EQUAL((const_matrix.template middleCols(sc)), (const_matrix.template block(0,sc))); VERIFY_IS_EQUAL((const_matrix.template rightCols()), (const_matrix.template block(0,cols-c))); } EIGEN_DECLARE_TEST(corners) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( corners(Matrix()) ); CALL_SUBTEST_2( corners(Matrix4d()) ); CALL_SUBTEST_3( corners(Matrix()) ); CALL_SUBTEST_4( corners(MatrixXcf(5, 7)) ); CALL_SUBTEST_5( corners(MatrixXf(21, 20)) ); CALL_SUBTEST_1(( corners_fixedsize, 1, 1, 0, 0>() )); CALL_SUBTEST_2(( corners_fixedsize() )); CALL_SUBTEST_3(( corners_fixedsize,4,7,5,2>() )); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/ctorleak.cpp ================================================ #include "main.h" #include // std::exception struct Foo { static Index object_count; static Index object_limit; int dummy; Foo() : dummy(0) { #ifdef EIGEN_EXCEPTIONS // TODO: Is this the correct way to handle this? if (Foo::object_count > Foo::object_limit) { std::cout << "\nThrow!\n"; throw Foo::Fail(); } #endif std::cout << '+'; ++Foo::object_count; } ~Foo() { std::cout << '-'; --Foo::object_count; } class Fail : public std::exception {}; }; Index Foo::object_count = 0; Index Foo::object_limit = 0; #undef EIGEN_TEST_MAX_SIZE #define EIGEN_TEST_MAX_SIZE 3 EIGEN_DECLARE_TEST(ctorleak) { typedef Matrix MatrixX; typedef Matrix VectorX; Foo::object_count = 0; for(int i = 0; i < g_repeat; i++) { Index rows = internal::random(2,EIGEN_TEST_MAX_SIZE), cols = internal::random(2,EIGEN_TEST_MAX_SIZE); Foo::object_limit = rows*cols; { MatrixX r(rows, cols); Foo::object_limit = r.size()+internal::random(0, rows*cols - 2); std::cout << "object_limit =" << Foo::object_limit << std::endl; #ifdef EIGEN_EXCEPTIONS try { #endif if(internal::random()) { std::cout << "\nMatrixX m(" << rows << ", " << cols << ");\n"; MatrixX m(rows, cols); } else { std::cout << "\nMatrixX m(r);\n"; MatrixX m(r); } #ifdef EIGEN_EXCEPTIONS VERIFY(false); // not reached if exceptions are enabled } catch (const Foo::Fail&) { /* ignore */ } #endif } VERIFY_IS_EQUAL(Index(0), Foo::object_count); { Foo::object_limit = (rows+1)*(cols+1); MatrixX A(rows, cols); VERIFY_IS_EQUAL(Foo::object_count, rows*cols); VectorX v=A.row(0); VERIFY_IS_EQUAL(Foo::object_count, (rows+1)*cols); v = A.col(0); VERIFY_IS_EQUAL(Foo::object_count, rows*(cols+1)); } VERIFY_IS_EQUAL(Index(0), Foo::object_count); } std::cout << "\n"; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/denseLM.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Desire Nuentsa // Copyright (C) 2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include #include #include #include "main.h" #include using namespace std; using namespace Eigen; template struct DenseLM : DenseFunctor { typedef DenseFunctor Base; typedef typename Base::JacobianType JacobianType; typedef Matrix VectorType; DenseLM(int n, int m) : DenseFunctor(n,m) { } VectorType model(const VectorType& uv, VectorType& x) { VectorType y; // Should change to use expression template int m = Base::values(); int n = Base::inputs(); eigen_assert(uv.size()%2 == 0); eigen_assert(uv.size() == n); eigen_assert(x.size() == m); y.setZero(m); int half = n/2; VectorBlock u(uv, 0, half); VectorBlock v(uv, half, half); for (int j = 0; j < m; j++) { for (int i = 0; i < half; i++) y(j) += u(i)*std::exp(-(x(j)-i)*(x(j)-i)/(v(i)*v(i))); } return y; } void initPoints(VectorType& uv_ref, VectorType& x) { m_x = x; m_y = this->model(uv_ref, x); } int operator()(const VectorType& uv, VectorType& fvec) { int m = Base::values(); int n = Base::inputs(); eigen_assert(uv.size()%2 == 0); eigen_assert(uv.size() == n); eigen_assert(fvec.size() == m); int half = n/2; VectorBlock u(uv, 0, half); VectorBlock v(uv, half, half); for (int j = 0; j < m; j++) { fvec(j) = m_y(j); for (int i = 0; i < half; i++) { fvec(j) -= u(i) *std::exp(-(m_x(j)-i)*(m_x(j)-i)/(v(i)*v(i))); } } return 0; } int df(const VectorType& uv, JacobianType& fjac) { int m = Base::values(); int n = Base::inputs(); eigen_assert(n == uv.size()); eigen_assert(fjac.rows() == m); eigen_assert(fjac.cols() == n); int half = n/2; VectorBlock u(uv, 0, half); VectorBlock v(uv, half, half); for (int j = 0; j < m; j++) { for (int i = 0; i < half; i++) { fjac.coeffRef(j,i) = -std::exp(-(m_x(j)-i)*(m_x(j)-i)/(v(i)*v(i))); fjac.coeffRef(j,i+half) = -2.*u(i)*(m_x(j)-i)*(m_x(j)-i)/(std::pow(v(i),3)) * std::exp(-(m_x(j)-i)*(m_x(j)-i)/(v(i)*v(i))); } } return 0; } VectorType m_x, m_y; //Data Points }; template int test_minimizeLM(FunctorType& functor, VectorType& uv) { LevenbergMarquardt lm(functor); LevenbergMarquardtSpace::Status info; info = lm.minimize(uv); VERIFY_IS_EQUAL(info, 1); //FIXME Check other parameters return info; } template int test_lmder(FunctorType& functor, VectorType& uv) { typedef typename VectorType::Scalar Scalar; LevenbergMarquardtSpace::Status info; LevenbergMarquardt lm(functor); info = lm.lmder1(uv); VERIFY_IS_EQUAL(info, 1); //FIXME Check other parameters return info; } template int test_minimizeSteps(FunctorType& functor, VectorType& uv) { LevenbergMarquardtSpace::Status info; LevenbergMarquardt lm(functor); info = lm.minimizeInit(uv); if (info==LevenbergMarquardtSpace::ImproperInputParameters) return info; do { info = lm.minimizeOneStep(uv); } while (info==LevenbergMarquardtSpace::Running); VERIFY_IS_EQUAL(info, 1); //FIXME Check other parameters return info; } template void test_denseLM_T() { typedef Matrix VectorType; int inputs = 10; int values = 1000; DenseLM dense_gaussian(inputs, values); VectorType uv(inputs),uv_ref(inputs); VectorType x(values); // Generate the reference solution uv_ref << -2, 1, 4 ,8, 6, 1.8, 1.2, 1.1, 1.9 , 3; //Generate the reference data points x.setRandom(); x = 10*x; x.array() += 10; dense_gaussian.initPoints(uv_ref, x); // Generate the initial parameters VectorBlock u(uv, 0, inputs/2); VectorBlock v(uv, inputs/2, inputs/2); // Solve the optimization problem //Solve in one go u.setOnes(); v.setOnes(); test_minimizeLM(dense_gaussian, uv); //Solve until the machine precision u.setOnes(); v.setOnes(); test_lmder(dense_gaussian, uv); // Solve step by step v.setOnes(); u.setOnes(); test_minimizeSteps(dense_gaussian, uv); } EIGEN_DECLARE_TEST(denseLM) { CALL_SUBTEST_2(test_denseLM_T()); // CALL_SUBTEST_2(test_sparseLM_T()); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/dense_storage.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2013 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include "AnnoyingScalar.h" #include "SafeScalar.h" #include #if EIGEN_HAS_TYPE_TRAITS && EIGEN_HAS_CXX11 using DenseStorageD3x3 = Eigen::DenseStorage; static_assert(std::is_trivially_move_constructible::value, "DenseStorage not trivially_move_constructible"); static_assert(std::is_trivially_move_assignable::value, "DenseStorage not trivially_move_assignable"); #if !defined(EIGEN_DENSE_STORAGE_CTOR_PLUGIN) static_assert(std::is_trivially_copy_constructible::value, "DenseStorage not trivially_copy_constructible"); static_assert(std::is_trivially_copy_assignable::value, "DenseStorage not trivially_copy_assignable"); static_assert(std::is_trivially_copyable::value, "DenseStorage not trivially_copyable"); #endif #endif template void dense_storage_copy(int rows, int cols) { typedef DenseStorage DenseStorageType; const int size = rows*cols; DenseStorageType reference(size, rows, cols); T* raw_reference = reference.data(); for (int i=0; i(i); DenseStorageType copied_reference(reference); const T* raw_copied_reference = copied_reference.data(); for (int i=0; i void dense_storage_assignment(int rows, int cols) { typedef DenseStorage DenseStorageType; const int size = rows*cols; DenseStorageType reference(size, rows, cols); T* raw_reference = reference.data(); for (int i=0; i(i); DenseStorageType copied_reference; copied_reference = reference; const T* raw_copied_reference = copied_reference.data(); for (int i=0; i void dense_storage_swap(int rows0, int cols0, int rows1, int cols1) { typedef DenseStorage DenseStorageType; const int size0 = rows0*cols0; DenseStorageType a(size0, rows0, cols0); for (int i=0; i(i); } const int size1 = rows1*cols1; DenseStorageType b(size1, rows1, cols1); for (int i=0; i(-i); } a.swap(b); for (int i=0; i(i)); } for (int i=0; i(-i)); } } template void dense_storage_alignment() { #if EIGEN_HAS_ALIGNAS struct alignas(Alignment) Empty1 {}; VERIFY_IS_EQUAL(std::alignment_of::value, Alignment); struct EIGEN_ALIGN_TO_BOUNDARY(Alignment) Empty2 {}; VERIFY_IS_EQUAL(std::alignment_of::value, Alignment); struct Nested1 { EIGEN_ALIGN_TO_BOUNDARY(Alignment) T data[Size]; }; VERIFY_IS_EQUAL(std::alignment_of::value, Alignment); VERIFY_IS_EQUAL( (std::alignment_of >::value), Alignment); const std::size_t default_alignment = internal::compute_default_alignment::value; VERIFY_IS_EQUAL( (std::alignment_of >::value), default_alignment); VERIFY_IS_EQUAL( (std::alignment_of >::value), default_alignment); struct Nested2 { Matrix mat; }; VERIFY_IS_EQUAL(std::alignment_of::value, default_alignment); #endif } template void dense_storage_tests() { // Dynamic Storage. dense_storage_copy(4, 3); dense_storage_copy(4, 3); dense_storage_copy(4, 3); // Fixed Storage. dense_storage_copy(4, 3); dense_storage_copy(4, 3); dense_storage_copy(4, 3); dense_storage_copy(4, 3); // Fixed Storage with Uninitialized Elements. dense_storage_copy(4, 3); dense_storage_copy(4, 3); dense_storage_copy(4, 3); // Dynamic Storage. dense_storage_assignment(4, 3); dense_storage_assignment(4, 3); dense_storage_assignment(4, 3); // Fixed Storage. dense_storage_assignment(4, 3); dense_storage_assignment(4, 3); dense_storage_assignment(4, 3); dense_storage_assignment(4, 3); // Fixed Storage with Uninitialized Elements. dense_storage_assignment(4, 3); dense_storage_assignment(4, 3); dense_storage_assignment(4, 3); // Dynamic Storage. dense_storage_swap(4, 3, 4, 3); dense_storage_swap(4, 3, 2, 1); dense_storage_swap(2, 1, 4, 3); dense_storage_swap(4, 3, 4, 3); dense_storage_swap(4, 3, 2, 3); dense_storage_swap(2, 3, 4, 3); dense_storage_swap(4, 3, 4, 3); dense_storage_swap(4, 3, 4, 1); dense_storage_swap(4, 1, 4, 3); // Fixed Storage. dense_storage_swap(4, 3, 4, 3); dense_storage_swap(4, 3, 4, 3); dense_storage_swap(4, 3, 2, 1); dense_storage_swap(2, 1, 4, 3); dense_storage_swap(4, 3, 4, 3); dense_storage_swap(4, 3, 4, 1); dense_storage_swap(4, 1, 4, 3); dense_storage_swap(4, 3, 4, 3); dense_storage_swap(4, 3, 2, 3); dense_storage_swap(2, 3, 4, 3); // Fixed Storage with Uninitialized Elements. dense_storage_swap(4, 3, 4, 3); dense_storage_swap(4, 3, 2, 1); dense_storage_swap(2, 1, 4, 3); dense_storage_swap(4, 3, 4, 3); dense_storage_swap(4, 3, 4, 1); dense_storage_swap(4, 1, 4, 3); dense_storage_swap(4, 3, 4, 3); dense_storage_swap(4, 3, 2, 3); dense_storage_swap(2, 3, 4, 3); dense_storage_alignment(); dense_storage_alignment(); dense_storage_alignment(); dense_storage_alignment(); } EIGEN_DECLARE_TEST(dense_storage) { dense_storage_tests(); dense_storage_tests(); dense_storage_tests >(); dense_storage_tests(); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/determinant.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Benoit Jacob // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include template void determinant(const MatrixType& m) { /* this test covers the following files: Determinant.h */ Index size = m.rows(); MatrixType m1(size, size), m2(size, size); m1.setRandom(); m2.setRandom(); typedef typename MatrixType::Scalar Scalar; Scalar x = internal::random(); VERIFY_IS_APPROX(MatrixType::Identity(size, size).determinant(), Scalar(1)); VERIFY_IS_APPROX((m1*m2).eval().determinant(), m1.determinant() * m2.determinant()); if(size==1) return; Index i = internal::random(0, size-1); Index j; do { j = internal::random(0, size-1); } while(j==i); m2 = m1; m2.row(i).swap(m2.row(j)); VERIFY_IS_APPROX(m2.determinant(), -m1.determinant()); m2 = m1; m2.col(i).swap(m2.col(j)); VERIFY_IS_APPROX(m2.determinant(), -m1.determinant()); VERIFY_IS_APPROX(m2.determinant(), m2.transpose().determinant()); VERIFY_IS_APPROX(numext::conj(m2.determinant()), m2.adjoint().determinant()); m2 = m1; m2.row(i) += x*m2.row(j); VERIFY_IS_APPROX(m2.determinant(), m1.determinant()); m2 = m1; m2.row(i) *= x; VERIFY_IS_APPROX(m2.determinant(), m1.determinant() * x); // check empty matrix VERIFY_IS_APPROX(m2.block(0,0,0,0).determinant(), Scalar(1)); } EIGEN_DECLARE_TEST(determinant) { for(int i = 0; i < g_repeat; i++) { int s = 0; CALL_SUBTEST_1( determinant(Matrix()) ); CALL_SUBTEST_2( determinant(Matrix()) ); CALL_SUBTEST_3( determinant(Matrix()) ); CALL_SUBTEST_4( determinant(Matrix()) ); CALL_SUBTEST_5( determinant(Matrix, 10, 10>()) ); s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); CALL_SUBTEST_6( determinant(MatrixXd(s, s)) ); TEST_SET_BUT_UNUSED_VARIABLE(s) } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/diagonal.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" template void diagonal(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; Index rows = m.rows(); Index cols = m.cols(); MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols); Scalar s1 = internal::random(); //check diagonal() VERIFY_IS_APPROX(m1.diagonal(), m1.transpose().diagonal()); m2.diagonal() = 2 * m1.diagonal(); m2.diagonal()[0] *= 3; if (rows>2) { enum { N1 = MatrixType::RowsAtCompileTime>2 ? 2 : 0, N2 = MatrixType::RowsAtCompileTime>1 ? -1 : 0 }; // check sub/super diagonal if(MatrixType::SizeAtCompileTime!=Dynamic) { VERIFY(m1.template diagonal().RowsAtCompileTime == m1.diagonal(N1).size()); VERIFY(m1.template diagonal().RowsAtCompileTime == m1.diagonal(N2).size()); } m2.template diagonal() = 2 * m1.template diagonal(); VERIFY_IS_APPROX(m2.template diagonal(), static_cast(2) * m1.diagonal(N1)); m2.template diagonal()[0] *= 3; VERIFY_IS_APPROX(m2.template diagonal()[0], static_cast(6) * m1.template diagonal()[0]); m2.template diagonal() = 2 * m1.template diagonal(); m2.template diagonal()[0] *= 3; VERIFY_IS_APPROX(m2.template diagonal()[0], static_cast(6) * m1.template diagonal()[0]); m2.diagonal(N1) = 2 * m1.diagonal(N1); VERIFY_IS_APPROX(m2.template diagonal(), static_cast(2) * m1.diagonal(N1)); m2.diagonal(N1)[0] *= 3; VERIFY_IS_APPROX(m2.diagonal(N1)[0], static_cast(6) * m1.diagonal(N1)[0]); m2.diagonal(N2) = 2 * m1.diagonal(N2); VERIFY_IS_APPROX(m2.template diagonal(), static_cast(2) * m1.diagonal(N2)); m2.diagonal(N2)[0] *= 3; VERIFY_IS_APPROX(m2.diagonal(N2)[0], static_cast(6) * m1.diagonal(N2)[0]); m2.diagonal(N2).x() = s1; VERIFY_IS_APPROX(m2.diagonal(N2).x(), s1); m2.diagonal(N2).coeffRef(0) = Scalar(2)*s1; VERIFY_IS_APPROX(m2.diagonal(N2).coeff(0), Scalar(2)*s1); } VERIFY( m1.diagonal( cols).size()==0 ); VERIFY( m1.diagonal(-rows).size()==0 ); } template void diagonal_assert(const MatrixType& m) { Index rows = m.rows(); Index cols = m.cols(); MatrixType m1 = MatrixType::Random(rows, cols); if (rows>=2 && cols>=2) { VERIFY_RAISES_ASSERT( m1 += m1.diagonal() ); VERIFY_RAISES_ASSERT( m1 -= m1.diagonal() ); VERIFY_RAISES_ASSERT( m1.array() *= m1.diagonal().array() ); VERIFY_RAISES_ASSERT( m1.array() /= m1.diagonal().array() ); } VERIFY_RAISES_ASSERT( m1.diagonal(cols+1) ); VERIFY_RAISES_ASSERT( m1.diagonal(-(rows+1)) ); } EIGEN_DECLARE_TEST(diagonal) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( diagonal(Matrix()) ); CALL_SUBTEST_1( diagonal(Matrix()) ); CALL_SUBTEST_1( diagonal(Matrix()) ); CALL_SUBTEST_2( diagonal(Matrix4d()) ); CALL_SUBTEST_2( diagonal(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_2( diagonal(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_2( diagonal(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_1( diagonal(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_1( diagonal(Matrix(3, 4)) ); CALL_SUBTEST_1( diagonal_assert(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/diagonal_matrix_variadic_ctor.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2019 David Tellenbach // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #define VERIFY_IMPLICIT_CONVERSION_3(DIAGTYPE, V0, V1, V2) \ DIAGTYPE d(V0, V1, V2); \ DIAGTYPE::DenseMatrixType Dense = d.toDenseMatrix(); \ VERIFY_IS_APPROX(Dense(0, 0), (Scalar)V0); \ VERIFY_IS_APPROX(Dense(1, 1), (Scalar)V1); \ VERIFY_IS_APPROX(Dense(2, 2), (Scalar)V2); #define VERIFY_IMPLICIT_CONVERSION_4(DIAGTYPE, V0, V1, V2, V3) \ DIAGTYPE d(V0, V1, V2, V3); \ DIAGTYPE::DenseMatrixType Dense = d.toDenseMatrix(); \ VERIFY_IS_APPROX(Dense(0, 0), (Scalar)V0); \ VERIFY_IS_APPROX(Dense(1, 1), (Scalar)V1); \ VERIFY_IS_APPROX(Dense(2, 2), (Scalar)V2); \ VERIFY_IS_APPROX(Dense(3, 3), (Scalar)V3); #define VERIFY_IMPLICIT_CONVERSION_5(DIAGTYPE, V0, V1, V2, V3, V4) \ DIAGTYPE d(V0, V1, V2, V3, V4); \ DIAGTYPE::DenseMatrixType Dense = d.toDenseMatrix(); \ VERIFY_IS_APPROX(Dense(0, 0), (Scalar)V0); \ VERIFY_IS_APPROX(Dense(1, 1), (Scalar)V1); \ VERIFY_IS_APPROX(Dense(2, 2), (Scalar)V2); \ VERIFY_IS_APPROX(Dense(3, 3), (Scalar)V3); \ VERIFY_IS_APPROX(Dense(4, 4), (Scalar)V4); template void constructorTest() { typedef DiagonalMatrix DiagonalMatrix0; typedef DiagonalMatrix DiagonalMatrix3; typedef DiagonalMatrix DiagonalMatrix4; typedef DiagonalMatrix DiagonalMatrixX; Scalar raw[7]; for (int k = 0; k < 7; ++k) raw[k] = internal::random(); // Fixed-sized matrices { DiagonalMatrix0 a {{}}; VERIFY(a.rows() == 0); VERIFY(a.cols() == 0); typename DiagonalMatrix0::DenseMatrixType m = a.toDenseMatrix(); for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); } { DiagonalMatrix3 a {{raw[0], raw[1], raw[2]}}; VERIFY(a.rows() == 3); VERIFY(a.cols() == 3); typename DiagonalMatrix3::DenseMatrixType m = a.toDenseMatrix(); for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); } { DiagonalMatrix4 a {{raw[0], raw[1], raw[2], raw[3]}}; VERIFY(a.rows() == 4); VERIFY(a.cols() == 4); typename DiagonalMatrix4::DenseMatrixType m = a.toDenseMatrix(); for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); } // dynamically sized matrices { DiagonalMatrixX a{{}}; VERIFY(a.rows() == 0); VERIFY(a.rows() == 0); typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix(); for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); } { DiagonalMatrixX a{{raw[0], raw[1], raw[2], raw[3], raw[4], raw[5], raw[6]}}; VERIFY(a.rows() == 7); VERIFY(a.rows() == 7); typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix(); for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); } } template<> void constructorTest() { typedef float Scalar; typedef DiagonalMatrix DiagonalMatrix0; typedef DiagonalMatrix DiagonalMatrix3; typedef DiagonalMatrix DiagonalMatrix4; typedef DiagonalMatrix DiagonalMatrix5; typedef DiagonalMatrix DiagonalMatrixX; Scalar raw[7]; for (int k = 0; k < 7; ++k) raw[k] = internal::random(); // Fixed-sized matrices { DiagonalMatrix0 a {{}}; VERIFY(a.rows() == 0); VERIFY(a.cols() == 0); typename DiagonalMatrix0::DenseMatrixType m = a.toDenseMatrix(); for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); } { DiagonalMatrix3 a {{raw[0], raw[1], raw[2]}}; VERIFY(a.rows() == 3); VERIFY(a.cols() == 3); typename DiagonalMatrix3::DenseMatrixType m = a.toDenseMatrix(); for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); } { DiagonalMatrix4 a {{raw[0], raw[1], raw[2], raw[3]}}; VERIFY(a.rows() == 4); VERIFY(a.cols() == 4); typename DiagonalMatrix4::DenseMatrixType m = a.toDenseMatrix(); for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); } // dynamically sized matrices { DiagonalMatrixX a{{}}; VERIFY(a.rows() == 0); VERIFY(a.rows() == 0); typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix(); for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); } { DiagonalMatrixX a{{raw[0], raw[1], raw[2], raw[3], raw[4], raw[5], raw[6]}}; VERIFY(a.rows() == 7); VERIFY(a.rows() == 7); typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix(); for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); } { VERIFY_IMPLICIT_CONVERSION_3(DiagonalMatrix3, 1.2647, 2.56f, -3); } { VERIFY_IMPLICIT_CONVERSION_4(DiagonalMatrix4, 1.2647, 2.56f, -3, 3.23f); } { VERIFY_IMPLICIT_CONVERSION_5(DiagonalMatrix5, 1.2647, 2.56f, -3, 3.23f, 2); } } EIGEN_DECLARE_TEST(diagonal_matrix_variadic_ctor) { CALL_SUBTEST_2(constructorTest()); CALL_SUBTEST_2(constructorTest()); CALL_SUBTEST_2(constructorTest()); CALL_SUBTEST_2(constructorTest()); CALL_SUBTEST_2(constructorTest()); CALL_SUBTEST_2(constructorTest()); CALL_SUBTEST_2(constructorTest>()); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/diagonalmatrices.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" using namespace std; template void diagonalmatrices(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; enum { Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime }; typedef Matrix VectorType; typedef Matrix RowVectorType; typedef Matrix SquareMatrixType; typedef Matrix DynMatrixType; typedef DiagonalMatrix LeftDiagonalMatrix; typedef DiagonalMatrix RightDiagonalMatrix; typedef Matrix BigMatrix; Index rows = m.rows(); Index cols = m.cols(); MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols); VectorType v1 = VectorType::Random(rows), v2 = VectorType::Random(rows); RowVectorType rv1 = RowVectorType::Random(cols), rv2 = RowVectorType::Random(cols); LeftDiagonalMatrix ldm1(v1), ldm2(v2); RightDiagonalMatrix rdm1(rv1), rdm2(rv2); Scalar s1 = internal::random(); SquareMatrixType sq_m1 (v1.asDiagonal()); VERIFY_IS_APPROX(sq_m1, v1.asDiagonal().toDenseMatrix()); sq_m1 = v1.asDiagonal(); VERIFY_IS_APPROX(sq_m1, v1.asDiagonal().toDenseMatrix()); SquareMatrixType sq_m2 = v1.asDiagonal(); VERIFY_IS_APPROX(sq_m1, sq_m2); ldm1 = v1.asDiagonal(); LeftDiagonalMatrix ldm3(v1); VERIFY_IS_APPROX(ldm1.diagonal(), ldm3.diagonal()); LeftDiagonalMatrix ldm4 = v1.asDiagonal(); VERIFY_IS_APPROX(ldm1.diagonal(), ldm4.diagonal()); sq_m1.block(0,0,rows,rows) = ldm1; VERIFY_IS_APPROX(sq_m1, ldm1.toDenseMatrix()); sq_m1.transpose() = ldm1; VERIFY_IS_APPROX(sq_m1, ldm1.toDenseMatrix()); Index i = internal::random(0, rows-1); Index j = internal::random(0, cols-1); VERIFY_IS_APPROX( ((ldm1 * m1)(i,j)) , ldm1.diagonal()(i) * m1(i,j) ); VERIFY_IS_APPROX( ((ldm1 * (m1+m2))(i,j)) , ldm1.diagonal()(i) * (m1+m2)(i,j) ); VERIFY_IS_APPROX( ((m1 * rdm1)(i,j)) , rdm1.diagonal()(j) * m1(i,j) ); VERIFY_IS_APPROX( ((v1.asDiagonal() * m1)(i,j)) , v1(i) * m1(i,j) ); VERIFY_IS_APPROX( ((m1 * rv1.asDiagonal())(i,j)) , rv1(j) * m1(i,j) ); VERIFY_IS_APPROX( (((v1+v2).asDiagonal() * m1)(i,j)) , (v1+v2)(i) * m1(i,j) ); VERIFY_IS_APPROX( (((v1+v2).asDiagonal() * (m1+m2))(i,j)) , (v1+v2)(i) * (m1+m2)(i,j) ); VERIFY_IS_APPROX( ((m1 * (rv1+rv2).asDiagonal())(i,j)) , (rv1+rv2)(j) * m1(i,j) ); VERIFY_IS_APPROX( (((m1+m2) * (rv1+rv2).asDiagonal())(i,j)) , (rv1+rv2)(j) * (m1+m2)(i,j) ); if(rows>1) { DynMatrixType tmp = m1.topRows(rows/2), res; VERIFY_IS_APPROX( (res = m1.topRows(rows/2) * rv1.asDiagonal()), tmp * rv1.asDiagonal() ); VERIFY_IS_APPROX( (res = v1.head(rows/2).asDiagonal()*m1.topRows(rows/2)), v1.head(rows/2).asDiagonal()*tmp ); } BigMatrix big; big.setZero(2*rows, 2*cols); big.block(i,j,rows,cols) = m1; big.block(i,j,rows,cols) = v1.asDiagonal() * big.block(i,j,rows,cols); VERIFY_IS_APPROX((big.block(i,j,rows,cols)) , v1.asDiagonal() * m1 ); big.block(i,j,rows,cols) = m1; big.block(i,j,rows,cols) = big.block(i,j,rows,cols) * rv1.asDiagonal(); VERIFY_IS_APPROX((big.block(i,j,rows,cols)) , m1 * rv1.asDiagonal() ); // scalar multiple VERIFY_IS_APPROX(LeftDiagonalMatrix(ldm1*s1).diagonal(), ldm1.diagonal() * s1); VERIFY_IS_APPROX(LeftDiagonalMatrix(s1*ldm1).diagonal(), s1 * ldm1.diagonal()); VERIFY_IS_APPROX(m1 * (rdm1 * s1), (m1 * rdm1) * s1); VERIFY_IS_APPROX(m1 * (s1 * rdm1), (m1 * rdm1) * s1); // Diagonal to dense sq_m1.setRandom(); sq_m2 = sq_m1; VERIFY_IS_APPROX( (sq_m1 += (s1*v1).asDiagonal()), sq_m2 += (s1*v1).asDiagonal().toDenseMatrix() ); VERIFY_IS_APPROX( (sq_m1 -= (s1*v1).asDiagonal()), sq_m2 -= (s1*v1).asDiagonal().toDenseMatrix() ); VERIFY_IS_APPROX( (sq_m1 = (s1*v1).asDiagonal()), (s1*v1).asDiagonal().toDenseMatrix() ); sq_m1.setRandom(); sq_m2 = v1.asDiagonal(); sq_m2 = sq_m1 * sq_m2; VERIFY_IS_APPROX( (sq_m1*v1.asDiagonal()).col(i), sq_m2.col(i) ); VERIFY_IS_APPROX( (sq_m1*v1.asDiagonal()).row(i), sq_m2.row(i) ); sq_m1 = v1.asDiagonal(); sq_m2 = v2.asDiagonal(); SquareMatrixType sq_m3 = v1.asDiagonal(); VERIFY_IS_APPROX( sq_m3 = v1.asDiagonal() + v2.asDiagonal(), sq_m1 + sq_m2); VERIFY_IS_APPROX( sq_m3 = v1.asDiagonal() - v2.asDiagonal(), sq_m1 - sq_m2); VERIFY_IS_APPROX( sq_m3 = v1.asDiagonal() - 2*v2.asDiagonal() + v1.asDiagonal(), sq_m1 - 2*sq_m2 + sq_m1); } template void as_scalar_product(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; typedef Matrix VectorType; typedef Matrix DynMatrixType; typedef Matrix DynVectorType; typedef Matrix DynRowVectorType; Index rows = m.rows(); Index depth = internal::random(1,EIGEN_TEST_MAX_SIZE); VectorType v1 = VectorType::Random(rows); DynVectorType dv1 = DynVectorType::Random(depth); DynRowVectorType drv1 = DynRowVectorType::Random(depth); DynMatrixType dm1 = dv1; DynMatrixType drm1 = drv1; Scalar s = v1(0); VERIFY_IS_APPROX( v1.asDiagonal() * drv1, s*drv1 ); VERIFY_IS_APPROX( dv1 * v1.asDiagonal(), dv1*s ); VERIFY_IS_APPROX( v1.asDiagonal() * drm1, s*drm1 ); VERIFY_IS_APPROX( dm1 * v1.asDiagonal(), dm1*s ); } template void bug987() { Matrix3Xd points = Matrix3Xd::Random(3, 3); Vector2d diag = Vector2d::Random(); Matrix2Xd tmp1 = points.topRows<2>(), res1, res2; VERIFY_IS_APPROX( res1 = diag.asDiagonal() * points.topRows<2>(), res2 = diag.asDiagonal() * tmp1 ); Matrix2d tmp2 = points.topLeftCorner<2,2>(); VERIFY_IS_APPROX(( res1 = points.topLeftCorner<2,2>()*diag.asDiagonal()) , res2 = tmp2*diag.asDiagonal() ); } EIGEN_DECLARE_TEST(diagonalmatrices) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( diagonalmatrices(Matrix()) ); CALL_SUBTEST_1( as_scalar_product(Matrix()) ); CALL_SUBTEST_2( diagonalmatrices(Matrix3f()) ); CALL_SUBTEST_3( diagonalmatrices(Matrix()) ); CALL_SUBTEST_4( diagonalmatrices(Matrix4d()) ); CALL_SUBTEST_5( diagonalmatrices(Matrix()) ); CALL_SUBTEST_6( diagonalmatrices(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( as_scalar_product(MatrixXcf(1,1)) ); CALL_SUBTEST_7( diagonalmatrices(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_8( diagonalmatrices(Matrix(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_9( diagonalmatrices(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_9( diagonalmatrices(MatrixXf(1,1)) ); CALL_SUBTEST_9( as_scalar_product(MatrixXf(1,1)) ); } CALL_SUBTEST_10( bug987<0>() ); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/dontalign.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #if defined EIGEN_TEST_PART_1 || defined EIGEN_TEST_PART_2 || defined EIGEN_TEST_PART_3 || defined EIGEN_TEST_PART_4 #define EIGEN_DONT_ALIGN #elif defined EIGEN_TEST_PART_5 || defined EIGEN_TEST_PART_6 || defined EIGEN_TEST_PART_7 || defined EIGEN_TEST_PART_8 #define EIGEN_DONT_ALIGN_STATICALLY #endif #include "main.h" #include template void dontalign(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; typedef Matrix VectorType; typedef Matrix SquareMatrixType; Index rows = m.rows(); Index cols = m.cols(); MatrixType a = MatrixType::Random(rows,cols); SquareMatrixType square = SquareMatrixType::Random(rows,rows); VectorType v = VectorType::Random(rows); VERIFY_IS_APPROX(v, square * square.colPivHouseholderQr().solve(v)); square = square.inverse().eval(); a = square * a; square = square*square; v = square * v; v = a.adjoint() * v; VERIFY(square.determinant() != Scalar(0)); // bug 219: MapAligned() was giving an assert with EIGEN_DONT_ALIGN, because Map Flags were miscomputed Scalar* array = internal::aligned_new(rows); v = VectorType::MapAligned(array, rows); internal::aligned_delete(array, rows); } EIGEN_DECLARE_TEST(dontalign) { #if defined EIGEN_TEST_PART_1 || defined EIGEN_TEST_PART_5 dontalign(Matrix3d()); dontalign(Matrix4f()); #elif defined EIGEN_TEST_PART_2 || defined EIGEN_TEST_PART_6 dontalign(Matrix3cd()); dontalign(Matrix4cf()); #elif defined EIGEN_TEST_PART_3 || defined EIGEN_TEST_PART_7 dontalign(Matrix()); dontalign(Matrix, 32, 32>()); #elif defined EIGEN_TEST_PART_4 || defined EIGEN_TEST_PART_8 dontalign(MatrixXd(32, 32)); dontalign(MatrixXcf(32, 32)); #endif } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/dynalloc.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #if EIGEN_MAX_ALIGN_BYTES>0 #define ALIGNMENT EIGEN_MAX_ALIGN_BYTES #else #define ALIGNMENT 1 #endif typedef Matrix Vector16f; typedef Matrix Vector8f; void check_handmade_aligned_malloc() { for(int i = 1; i < 1000; i++) { char *p = (char*)internal::handmade_aligned_malloc(i); VERIFY(internal::UIntPtr(p)%ALIGNMENT==0); // if the buffer is wrongly allocated this will give a bad write --> check with valgrind for(int j = 0; j < i; j++) p[j]=0; internal::handmade_aligned_free(p); } } void check_aligned_malloc() { for(int i = ALIGNMENT; i < 1000; i++) { char *p = (char*)internal::aligned_malloc(i); VERIFY(internal::UIntPtr(p)%ALIGNMENT==0); // if the buffer is wrongly allocated this will give a bad write --> check with valgrind for(int j = 0; j < i; j++) p[j]=0; internal::aligned_free(p); } } void check_aligned_new() { for(int i = ALIGNMENT; i < 1000; i++) { float *p = internal::aligned_new(i); VERIFY(internal::UIntPtr(p)%ALIGNMENT==0); // if the buffer is wrongly allocated this will give a bad write --> check with valgrind for(int j = 0; j < i; j++) p[j]=0; internal::aligned_delete(p,i); } } void check_aligned_stack_alloc() { for(int i = ALIGNMENT; i < 400; i++) { ei_declare_aligned_stack_constructed_variable(float,p,i,0); VERIFY(internal::UIntPtr(p)%ALIGNMENT==0); // if the buffer is wrongly allocated this will give a bad write --> check with valgrind for(int j = 0; j < i; j++) p[j]=0; } } // test compilation with both a struct and a class... struct MyStruct { EIGEN_MAKE_ALIGNED_OPERATOR_NEW char dummychar; Vector16f avec; }; class MyClassA { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW char dummychar; Vector16f avec; }; template void check_dynaligned() { // TODO have to be updated once we support multiple alignment values if(T::SizeAtCompileTime % ALIGNMENT == 0) { T* obj = new T; VERIFY(T::NeedsToAlign==1); VERIFY(internal::UIntPtr(obj)%ALIGNMENT==0); delete obj; } } template void check_custom_new_delete() { { T* t = new T; delete t; } { std::size_t N = internal::random(1,10); T* t = new T[N]; delete[] t; } #if EIGEN_MAX_ALIGN_BYTES>0 && (!EIGEN_HAS_CXX17_OVERALIGN) { T* t = static_cast((T::operator new)(sizeof(T))); (T::operator delete)(t, sizeof(T)); } { T* t = static_cast((T::operator new)(sizeof(T))); (T::operator delete)(t); } #endif } EIGEN_DECLARE_TEST(dynalloc) { // low level dynamic memory allocation CALL_SUBTEST(check_handmade_aligned_malloc()); CALL_SUBTEST(check_aligned_malloc()); CALL_SUBTEST(check_aligned_new()); CALL_SUBTEST(check_aligned_stack_alloc()); for (int i=0; i() ); CALL_SUBTEST( check_custom_new_delete() ); CALL_SUBTEST( check_custom_new_delete() ); CALL_SUBTEST( check_custom_new_delete() ); } // check static allocation, who knows ? #if EIGEN_MAX_STATIC_ALIGN_BYTES for (int i=0; i() ); CALL_SUBTEST(check_dynaligned() ); CALL_SUBTEST(check_dynaligned() ); CALL_SUBTEST(check_dynaligned() ); CALL_SUBTEST(check_dynaligned() ); CALL_SUBTEST(check_dynaligned() ); CALL_SUBTEST(check_dynaligned() ); } { MyStruct foo0; VERIFY(internal::UIntPtr(foo0.avec.data())%ALIGNMENT==0); MyClassA fooA; VERIFY(internal::UIntPtr(fooA.avec.data())%ALIGNMENT==0); } // dynamic allocation, single object for (int i=0; iavec.data())%ALIGNMENT==0); MyClassA *fooA = new MyClassA(); VERIFY(internal::UIntPtr(fooA->avec.data())%ALIGNMENT==0); delete foo0; delete fooA; } // dynamic allocation, array const int N = 10; for (int i=0; iavec.data())%ALIGNMENT==0); MyClassA *fooA = new MyClassA[N]; VERIFY(internal::UIntPtr(fooA->avec.data())%ALIGNMENT==0); delete[] foo0; delete[] fooA; } #endif } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/eigen2support.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN2_SUPPORT #include "main.h" template void eigen2support(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; Index rows = m.rows(); Index cols = m.cols(); MatrixType m1 = MatrixType::Random(rows, cols), m3(rows, cols); Scalar s1 = internal::random(), s2 = internal::random(); // scalar addition VERIFY_IS_APPROX(m1.cwise() + s1, s1 + m1.cwise()); VERIFY_IS_APPROX(m1.cwise() + s1, MatrixType::Constant(rows,cols,s1) + m1); VERIFY_IS_APPROX((m1*Scalar(2)).cwise() - s2, (m1+m1) - MatrixType::Constant(rows,cols,s2) ); m3 = m1; m3.cwise() += s2; VERIFY_IS_APPROX(m3, m1.cwise() + s2); m3 = m1; m3.cwise() -= s1; VERIFY_IS_APPROX(m3, m1.cwise() - s1); VERIFY_IS_EQUAL((m1.corner(TopLeft,1,1)), (m1.block(0,0,1,1))); VERIFY_IS_EQUAL((m1.template corner<1,1>(TopLeft)), (m1.template block<1,1>(0,0))); VERIFY_IS_EQUAL((m1.col(0).start(1)), (m1.col(0).segment(0,1))); VERIFY_IS_EQUAL((m1.col(0).template start<1>()), (m1.col(0).segment(0,1))); VERIFY_IS_EQUAL((m1.col(0).end(1)), (m1.col(0).segment(rows-1,1))); VERIFY_IS_EQUAL((m1.col(0).template end<1>()), (m1.col(0).segment(rows-1,1))); using std::cos; using numext::real; using numext::abs2; VERIFY_IS_EQUAL(ei_cos(s1), cos(s1)); VERIFY_IS_EQUAL(ei_real(s1), real(s1)); VERIFY_IS_EQUAL(ei_abs2(s1), abs2(s1)); m1.minor(0,0); } EIGEN_DECLARE_TEST(eigen2support) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( eigen2support(Matrix()) ); CALL_SUBTEST_2( eigen2support(MatrixXd(1,1)) ); CALL_SUBTEST_4( eigen2support(Matrix3f()) ); CALL_SUBTEST_5( eigen2support(Matrix4d()) ); CALL_SUBTEST_2( eigen2support(MatrixXf(200,200)) ); CALL_SUBTEST_6( eigen2support(MatrixXcd(100,100)) ); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/eigensolver_complex.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // Copyright (C) 2010 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include #include #include template bool find_pivot(typename MatrixType::Scalar tol, MatrixType &diffs, Index col=0) { bool match = diffs.diagonal().sum() <= tol; if(match || col==diffs.cols()) { return match; } else { Index n = diffs.cols(); std::vector > transpositions; for(Index i=col; i tol) break; best_index += col; diffs.row(col).swap(diffs.row(best_index)); if(find_pivot(tol,diffs,col+1)) return true; diffs.row(col).swap(diffs.row(best_index)); // move current pivot to the end diffs.row(n-(i-col)-1).swap(diffs.row(best_index)); transpositions.push_back(std::pair(n-(i-col)-1,best_index)); } // restore for(Index k=transpositions.size()-1; k>=0; --k) diffs.row(transpositions[k].first).swap(diffs.row(transpositions[k].second)); } return false; } /* Check that two column vectors are approximately equal up to permutations. * Initially, this method checked that the k-th power sums are equal for all k = 1, ..., vec1.rows(), * however this strategy is numerically inacurate because of numerical cancellation issues. */ template void verify_is_approx_upto_permutation(const VectorType& vec1, const VectorType& vec2) { typedef typename VectorType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; VERIFY(vec1.cols() == 1); VERIFY(vec2.cols() == 1); VERIFY(vec1.rows() == vec2.rows()); Index n = vec1.rows(); RealScalar tol = test_precision()*test_precision()*numext::maxi(vec1.squaredNorm(),vec2.squaredNorm()); Matrix diffs = (vec1.rowwise().replicate(n) - vec2.rowwise().replicate(n).transpose()).cwiseAbs2(); VERIFY( find_pivot(tol, diffs) ); } template void eigensolver(const MatrixType& m) { /* this test covers the following files: ComplexEigenSolver.h, and indirectly ComplexSchur.h */ Index rows = m.rows(); Index cols = m.cols(); typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; MatrixType a = MatrixType::Random(rows,cols); MatrixType symmA = a.adjoint() * a; ComplexEigenSolver ei0(symmA); VERIFY_IS_EQUAL(ei0.info(), Success); VERIFY_IS_APPROX(symmA * ei0.eigenvectors(), ei0.eigenvectors() * ei0.eigenvalues().asDiagonal()); ComplexEigenSolver ei1(a); VERIFY_IS_EQUAL(ei1.info(), Success); VERIFY_IS_APPROX(a * ei1.eigenvectors(), ei1.eigenvectors() * ei1.eigenvalues().asDiagonal()); // Note: If MatrixType is real then a.eigenvalues() uses EigenSolver and thus // another algorithm so results may differ slightly verify_is_approx_upto_permutation(a.eigenvalues(), ei1.eigenvalues()); ComplexEigenSolver ei2; ei2.setMaxIterations(ComplexSchur::m_maxIterationsPerRow * rows).compute(a); VERIFY_IS_EQUAL(ei2.info(), Success); VERIFY_IS_EQUAL(ei2.eigenvectors(), ei1.eigenvectors()); VERIFY_IS_EQUAL(ei2.eigenvalues(), ei1.eigenvalues()); if (rows > 2) { ei2.setMaxIterations(1).compute(a); VERIFY_IS_EQUAL(ei2.info(), NoConvergence); VERIFY_IS_EQUAL(ei2.getMaxIterations(), 1); } ComplexEigenSolver eiNoEivecs(a, false); VERIFY_IS_EQUAL(eiNoEivecs.info(), Success); VERIFY_IS_APPROX(ei1.eigenvalues(), eiNoEivecs.eigenvalues()); // Regression test for issue #66 MatrixType z = MatrixType::Zero(rows,cols); ComplexEigenSolver eiz(z); VERIFY((eiz.eigenvalues().cwiseEqual(0)).all()); MatrixType id = MatrixType::Identity(rows, cols); VERIFY_IS_APPROX(id.operatorNorm(), RealScalar(1)); if (rows > 1 && rows < 20) { // Test matrix with NaN a(0,0) = std::numeric_limits::quiet_NaN(); ComplexEigenSolver eiNaN(a); VERIFY_IS_EQUAL(eiNaN.info(), NoConvergence); } // regression test for bug 1098 { ComplexEigenSolver eig(a.adjoint() * a); eig.compute(a.adjoint() * a); } // regression test for bug 478 { a.setZero(); ComplexEigenSolver ei3(a); VERIFY_IS_EQUAL(ei3.info(), Success); VERIFY_IS_MUCH_SMALLER_THAN(ei3.eigenvalues().norm(),RealScalar(1)); VERIFY((ei3.eigenvectors().transpose()*ei3.eigenvectors().transpose()).eval().isIdentity()); } } template void eigensolver_verify_assert(const MatrixType& m) { ComplexEigenSolver eig; VERIFY_RAISES_ASSERT(eig.eigenvectors()); VERIFY_RAISES_ASSERT(eig.eigenvalues()); MatrixType a = MatrixType::Random(m.rows(),m.cols()); eig.compute(a, false); VERIFY_RAISES_ASSERT(eig.eigenvectors()); } EIGEN_DECLARE_TEST(eigensolver_complex) { int s = 0; for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( eigensolver(Matrix4cf()) ); s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); CALL_SUBTEST_2( eigensolver(MatrixXcd(s,s)) ); CALL_SUBTEST_3( eigensolver(Matrix, 1, 1>()) ); CALL_SUBTEST_4( eigensolver(Matrix3f()) ); TEST_SET_BUT_UNUSED_VARIABLE(s) } CALL_SUBTEST_1( eigensolver_verify_assert(Matrix4cf()) ); s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXcd(s,s)) ); CALL_SUBTEST_3( eigensolver_verify_assert(Matrix, 1, 1>()) ); CALL_SUBTEST_4( eigensolver_verify_assert(Matrix3f()) ); // Test problem size constructors CALL_SUBTEST_5(ComplexEigenSolver tmp(s)); TEST_SET_BUT_UNUSED_VARIABLE(s) } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/eigensolver_generalized_real.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012-2016 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_RUNTIME_NO_MALLOC #include "main.h" #include #include #include template void generalized_eigensolver_real(const MatrixType& m) { /* this test covers the following files: GeneralizedEigenSolver.h */ Index rows = m.rows(); Index cols = m.cols(); typedef typename MatrixType::Scalar Scalar; typedef std::complex ComplexScalar; typedef Matrix VectorType; MatrixType a = MatrixType::Random(rows,cols); MatrixType b = MatrixType::Random(rows,cols); MatrixType a1 = MatrixType::Random(rows,cols); MatrixType b1 = MatrixType::Random(rows,cols); MatrixType spdA = a.adjoint() * a + a1.adjoint() * a1; MatrixType spdB = b.adjoint() * b + b1.adjoint() * b1; // lets compare to GeneralizedSelfAdjointEigenSolver { GeneralizedSelfAdjointEigenSolver symmEig(spdA, spdB); GeneralizedEigenSolver eig(spdA, spdB); VERIFY_IS_EQUAL(eig.eigenvalues().imag().cwiseAbs().maxCoeff(), 0); VectorType realEigenvalues = eig.eigenvalues().real(); std::sort(realEigenvalues.data(), realEigenvalues.data()+realEigenvalues.size()); VERIFY_IS_APPROX(realEigenvalues, symmEig.eigenvalues()); // check eigenvectors typename GeneralizedEigenSolver::EigenvectorsType D = eig.eigenvalues().asDiagonal(); typename GeneralizedEigenSolver::EigenvectorsType V = eig.eigenvectors(); VERIFY_IS_APPROX(spdA*V, spdB*V*D); } // non symmetric case: { GeneralizedEigenSolver eig(rows); // TODO enable full-prealocation of required memory, this probably requires an in-place mode for HessenbergDecomposition //Eigen::internal::set_is_malloc_allowed(false); eig.compute(a,b); //Eigen::internal::set_is_malloc_allowed(true); for(Index k=0; k tmp = (eig.betas()(k)*a).template cast() - eig.alphas()(k)*b; if(tmp.size()>1 && tmp.norm()>(std::numeric_limits::min)()) tmp /= tmp.norm(); VERIFY_IS_MUCH_SMALLER_THAN( std::abs(tmp.determinant()), Scalar(1) ); } // check eigenvectors typename GeneralizedEigenSolver::EigenvectorsType D = eig.eigenvalues().asDiagonal(); typename GeneralizedEigenSolver::EigenvectorsType V = eig.eigenvectors(); VERIFY_IS_APPROX(a*V, b*V*D); } // regression test for bug 1098 { GeneralizedSelfAdjointEigenSolver eig1(a.adjoint() * a,b.adjoint() * b); eig1.compute(a.adjoint() * a,b.adjoint() * b); GeneralizedEigenSolver eig2(a.adjoint() * a,b.adjoint() * b); eig2.compute(a.adjoint() * a,b.adjoint() * b); } // check without eigenvectors { GeneralizedEigenSolver eig1(spdA, spdB, true); GeneralizedEigenSolver eig2(spdA, spdB, false); VERIFY_IS_APPROX(eig1.eigenvalues(), eig2.eigenvalues()); } } EIGEN_DECLARE_TEST(eigensolver_generalized_real) { for(int i = 0; i < g_repeat; i++) { int s = 0; CALL_SUBTEST_1( generalized_eigensolver_real(Matrix4f()) ); s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); CALL_SUBTEST_2( generalized_eigensolver_real(MatrixXd(s,s)) ); // some trivial but implementation-wise special cases CALL_SUBTEST_2( generalized_eigensolver_real(MatrixXd(1,1)) ); CALL_SUBTEST_2( generalized_eigensolver_real(MatrixXd(2,2)) ); CALL_SUBTEST_3( generalized_eigensolver_real(Matrix()) ); CALL_SUBTEST_4( generalized_eigensolver_real(Matrix2d()) ); TEST_SET_BUT_UNUSED_VARIABLE(s) } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/eigensolver_generic.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2010,2012 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include #include template void check_eigensolver_for_given_mat(const EigType &eig, const MatType& a) { typedef typename NumTraits::Real RealScalar; typedef Matrix RealVectorType; typedef typename std::complex Complex; Index n = a.rows(); VERIFY_IS_EQUAL(eig.info(), Success); VERIFY_IS_APPROX(a * eig.pseudoEigenvectors(), eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()); VERIFY_IS_APPROX(a.template cast() * eig.eigenvectors(), eig.eigenvectors() * eig.eigenvalues().asDiagonal()); VERIFY_IS_APPROX(eig.eigenvectors().colwise().norm(), RealVectorType::Ones(n).transpose()); VERIFY_IS_APPROX(a.eigenvalues(), eig.eigenvalues()); } template void eigensolver(const MatrixType& m) { /* this test covers the following files: EigenSolver.h */ Index rows = m.rows(); Index cols = m.cols(); typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename std::complex Complex; MatrixType a = MatrixType::Random(rows,cols); MatrixType a1 = MatrixType::Random(rows,cols); MatrixType symmA = a.adjoint() * a + a1.adjoint() * a1; EigenSolver ei0(symmA); VERIFY_IS_EQUAL(ei0.info(), Success); VERIFY_IS_APPROX(symmA * ei0.pseudoEigenvectors(), ei0.pseudoEigenvectors() * ei0.pseudoEigenvalueMatrix()); VERIFY_IS_APPROX((symmA.template cast()) * (ei0.pseudoEigenvectors().template cast()), (ei0.pseudoEigenvectors().template cast()) * (ei0.eigenvalues().asDiagonal())); EigenSolver ei1(a); CALL_SUBTEST( check_eigensolver_for_given_mat(ei1,a) ); EigenSolver ei2; ei2.setMaxIterations(RealSchur::m_maxIterationsPerRow * rows).compute(a); VERIFY_IS_EQUAL(ei2.info(), Success); VERIFY_IS_EQUAL(ei2.eigenvectors(), ei1.eigenvectors()); VERIFY_IS_EQUAL(ei2.eigenvalues(), ei1.eigenvalues()); if (rows > 2) { ei2.setMaxIterations(1).compute(a); VERIFY_IS_EQUAL(ei2.info(), NoConvergence); VERIFY_IS_EQUAL(ei2.getMaxIterations(), 1); } EigenSolver eiNoEivecs(a, false); VERIFY_IS_EQUAL(eiNoEivecs.info(), Success); VERIFY_IS_APPROX(ei1.eigenvalues(), eiNoEivecs.eigenvalues()); VERIFY_IS_APPROX(ei1.pseudoEigenvalueMatrix(), eiNoEivecs.pseudoEigenvalueMatrix()); MatrixType id = MatrixType::Identity(rows, cols); VERIFY_IS_APPROX(id.operatorNorm(), RealScalar(1)); if (rows > 2 && rows < 20) { // Test matrix with NaN a(0,0) = std::numeric_limits::quiet_NaN(); EigenSolver eiNaN(a); VERIFY_IS_NOT_EQUAL(eiNaN.info(), Success); } // regression test for bug 1098 { EigenSolver eig(a.adjoint() * a); eig.compute(a.adjoint() * a); } // regression test for bug 478 { a.setZero(); EigenSolver ei3(a); VERIFY_IS_EQUAL(ei3.info(), Success); VERIFY_IS_MUCH_SMALLER_THAN(ei3.eigenvalues().norm(),RealScalar(1)); VERIFY((ei3.eigenvectors().transpose()*ei3.eigenvectors().transpose()).eval().isIdentity()); } } template void eigensolver_verify_assert(const MatrixType& m) { EigenSolver eig; VERIFY_RAISES_ASSERT(eig.eigenvectors()); VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors()); VERIFY_RAISES_ASSERT(eig.pseudoEigenvalueMatrix()); VERIFY_RAISES_ASSERT(eig.eigenvalues()); MatrixType a = MatrixType::Random(m.rows(),m.cols()); eig.compute(a, false); VERIFY_RAISES_ASSERT(eig.eigenvectors()); VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors()); } template Matrix make_companion(const CoeffType& coeffs) { Index n = coeffs.size()-1; Matrix res(n,n); res.setZero(); res.row(0) = -coeffs.tail(n) / coeffs(0); res.diagonal(-1).setOnes(); return res; } template void eigensolver_generic_extra() { { // regression test for bug 793 MatrixXd a(3,3); a << 0, 0, 1, 1, 1, 1, 1, 1e+200, 1; Eigen::EigenSolver eig(a); double scale = 1e-200; // scale to avoid overflow during the comparisons VERIFY_IS_APPROX(a * eig.pseudoEigenvectors()*scale, eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()*scale); VERIFY_IS_APPROX(a * eig.eigenvectors()*scale, eig.eigenvectors() * eig.eigenvalues().asDiagonal()*scale); } { // check a case where all eigenvalues are null. MatrixXd a(2,2); a << 1, 1, -1, -1; Eigen::EigenSolver eig(a); VERIFY_IS_APPROX(eig.pseudoEigenvectors().squaredNorm(), 2.); VERIFY_IS_APPROX((a * eig.pseudoEigenvectors()).norm()+1., 1.); VERIFY_IS_APPROX((eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()).norm()+1., 1.); VERIFY_IS_APPROX((a * eig.eigenvectors()).norm()+1., 1.); VERIFY_IS_APPROX((eig.eigenvectors() * eig.eigenvalues().asDiagonal()).norm()+1., 1.); } // regression test for bug 933 { { VectorXd coeffs(5); coeffs << 1, -3, -175, -225, 2250; MatrixXd C = make_companion(coeffs); EigenSolver eig(C); CALL_SUBTEST( check_eigensolver_for_given_mat(eig,C) ); } { // this test is tricky because it requires high accuracy in smallest eigenvalues VectorXd coeffs(5); coeffs << 6.154671e-15, -1.003870e-10, -9.819570e-01, 3.995715e+03, 2.211511e+08; MatrixXd C = make_companion(coeffs); EigenSolver eig(C); CALL_SUBTEST( check_eigensolver_for_given_mat(eig,C) ); Index n = C.rows(); for(Index i=0;i Complex; MatrixXcd ac = C.cast(); ac.diagonal().array() -= eig.eigenvalues()(i); VectorXd sv = ac.jacobiSvd().singularValues(); // comparing to sv(0) is not enough here to catch the "bug", // the hard-coded 1.0 is important! VERIFY_IS_MUCH_SMALLER_THAN(sv(n-1), 1.0); } } } // regression test for bug 1557 { // this test is interesting because it contains zeros on the diagonal. MatrixXd A_bug1557(3,3); A_bug1557 << 0, 0, 0, 1, 0, 0.5887907064808635127, 0, 1, 0; EigenSolver eig(A_bug1557); CALL_SUBTEST( check_eigensolver_for_given_mat(eig,A_bug1557) ); } // regression test for bug 1174 { Index n = 12; MatrixXf A_bug1174(n,n); A_bug1174 << 262144, 0, 0, 262144, 786432, 0, 0, 0, 0, 0, 0, 786432, 262144, 0, 0, 262144, 786432, 0, 0, 0, 0, 0, 0, 786432, 262144, 0, 0, 262144, 786432, 0, 0, 0, 0, 0, 0, 786432, 262144, 0, 0, 262144, 786432, 0, 0, 0, 0, 0, 0, 786432, 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0, 0, 262144, 262144, 0, 0, 262144, 262144, 262144, 262144, 262144, 262144, 0; EigenSolver eig(A_bug1174); CALL_SUBTEST( check_eigensolver_for_given_mat(eig,A_bug1174) ); } } EIGEN_DECLARE_TEST(eigensolver_generic) { int s = 0; for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( eigensolver(Matrix4f()) ); s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); CALL_SUBTEST_2( eigensolver(MatrixXd(s,s)) ); TEST_SET_BUT_UNUSED_VARIABLE(s) // some trivial but implementation-wise tricky cases CALL_SUBTEST_2( eigensolver(MatrixXd(1,1)) ); CALL_SUBTEST_2( eigensolver(MatrixXd(2,2)) ); CALL_SUBTEST_3( eigensolver(Matrix()) ); CALL_SUBTEST_4( eigensolver(Matrix2d()) ); } CALL_SUBTEST_1( eigensolver_verify_assert(Matrix4f()) ); s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXd(s,s)) ); CALL_SUBTEST_3( eigensolver_verify_assert(Matrix()) ); CALL_SUBTEST_4( eigensolver_verify_assert(Matrix2d()) ); // Test problem size constructors CALL_SUBTEST_5(EigenSolver tmp(s)); // regression test for bug 410 CALL_SUBTEST_2( { MatrixXd A(1,1); A(0,0) = std::sqrt(-1.); // is Not-a-Number Eigen::EigenSolver solver(A); VERIFY_IS_EQUAL(solver.info(), NumericalIssue); } ); CALL_SUBTEST_2( eigensolver_generic_extra<0>() ); TEST_SET_BUT_UNUSED_VARIABLE(s) } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/eigensolver_selfadjoint.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2010 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include "svd_fill.h" #include #include #include template void selfadjointeigensolver_essential_check(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; RealScalar eival_eps = numext::mini(test_precision(), NumTraits::dummy_precision()*20000); SelfAdjointEigenSolver eiSymm(m); VERIFY_IS_EQUAL(eiSymm.info(), Success); RealScalar scaling = m.cwiseAbs().maxCoeff(); if(scaling<(std::numeric_limits::min)()) { VERIFY(eiSymm.eigenvalues().cwiseAbs().maxCoeff() <= (std::numeric_limits::min)()); } else { VERIFY_IS_APPROX((m.template selfadjointView() * eiSymm.eigenvectors())/scaling, (eiSymm.eigenvectors() * eiSymm.eigenvalues().asDiagonal())/scaling); } VERIFY_IS_APPROX(m.template selfadjointView().eigenvalues(), eiSymm.eigenvalues()); VERIFY_IS_UNITARY(eiSymm.eigenvectors()); if(m.cols()<=4) { SelfAdjointEigenSolver eiDirect; eiDirect.computeDirect(m); VERIFY_IS_EQUAL(eiDirect.info(), Success); if(! eiSymm.eigenvalues().isApprox(eiDirect.eigenvalues(), eival_eps) ) { std::cerr << "reference eigenvalues: " << eiSymm.eigenvalues().transpose() << "\n" << "obtained eigenvalues: " << eiDirect.eigenvalues().transpose() << "\n" << "diff: " << (eiSymm.eigenvalues()-eiDirect.eigenvalues()).transpose() << "\n" << "error (eps): " << (eiSymm.eigenvalues()-eiDirect.eigenvalues()).norm() / eiSymm.eigenvalues().norm() << " (" << eival_eps << ")\n"; } if(scaling<(std::numeric_limits::min)()) { VERIFY(eiDirect.eigenvalues().cwiseAbs().maxCoeff() <= (std::numeric_limits::min)()); } else { VERIFY_IS_APPROX(eiSymm.eigenvalues()/scaling, eiDirect.eigenvalues()/scaling); VERIFY_IS_APPROX((m.template selfadjointView() * eiDirect.eigenvectors())/scaling, (eiDirect.eigenvectors() * eiDirect.eigenvalues().asDiagonal())/scaling); VERIFY_IS_APPROX(m.template selfadjointView().eigenvalues()/scaling, eiDirect.eigenvalues()/scaling); } VERIFY_IS_UNITARY(eiDirect.eigenvectors()); } } template void selfadjointeigensolver(const MatrixType& m) { /* this test covers the following files: EigenSolver.h, SelfAdjointEigenSolver.h (and indirectly: Tridiagonalization.h) */ Index rows = m.rows(); Index cols = m.cols(); typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; RealScalar largerEps = 10*test_precision(); MatrixType a = MatrixType::Random(rows,cols); MatrixType a1 = MatrixType::Random(rows,cols); MatrixType symmA = a.adjoint() * a + a1.adjoint() * a1; MatrixType symmC = symmA; svd_fill_random(symmA,Symmetric); symmA.template triangularView().setZero(); symmC.template triangularView().setZero(); MatrixType b = MatrixType::Random(rows,cols); MatrixType b1 = MatrixType::Random(rows,cols); MatrixType symmB = b.adjoint() * b + b1.adjoint() * b1; symmB.template triangularView().setZero(); CALL_SUBTEST( selfadjointeigensolver_essential_check(symmA) ); SelfAdjointEigenSolver eiSymm(symmA); // generalized eigen pb GeneralizedSelfAdjointEigenSolver eiSymmGen(symmC, symmB); SelfAdjointEigenSolver eiSymmNoEivecs(symmA, false); VERIFY_IS_EQUAL(eiSymmNoEivecs.info(), Success); VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues()); // generalized eigen problem Ax = lBx eiSymmGen.compute(symmC, symmB,Ax_lBx); VERIFY_IS_EQUAL(eiSymmGen.info(), Success); VERIFY((symmC.template selfadjointView() * eiSymmGen.eigenvectors()).isApprox( symmB.template selfadjointView() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps)); // generalized eigen problem BAx = lx eiSymmGen.compute(symmC, symmB,BAx_lx); VERIFY_IS_EQUAL(eiSymmGen.info(), Success); VERIFY((symmB.template selfadjointView() * (symmC.template selfadjointView() * eiSymmGen.eigenvectors())).isApprox( (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps)); // generalized eigen problem ABx = lx eiSymmGen.compute(symmC, symmB,ABx_lx); VERIFY_IS_EQUAL(eiSymmGen.info(), Success); VERIFY((symmC.template selfadjointView() * (symmB.template selfadjointView() * eiSymmGen.eigenvectors())).isApprox( (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps)); eiSymm.compute(symmC); MatrixType sqrtSymmA = eiSymm.operatorSqrt(); VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView()), sqrtSymmA*sqrtSymmA); VERIFY_IS_APPROX(sqrtSymmA, symmC.template selfadjointView()*eiSymm.operatorInverseSqrt()); MatrixType id = MatrixType::Identity(rows, cols); VERIFY_IS_APPROX(id.template selfadjointView().operatorNorm(), RealScalar(1)); SelfAdjointEigenSolver eiSymmUninitialized; VERIFY_RAISES_ASSERT(eiSymmUninitialized.info()); VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvalues()); VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors()); VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt()); VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt()); eiSymmUninitialized.compute(symmA, false); VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors()); VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt()); VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt()); // test Tridiagonalization's methods Tridiagonalization tridiag(symmC); VERIFY_IS_APPROX(tridiag.diagonal(), tridiag.matrixT().diagonal()); VERIFY_IS_APPROX(tridiag.subDiagonal(), tridiag.matrixT().template diagonal<-1>()); Matrix T = tridiag.matrixT(); if(rows>1 && cols>1) { // FIXME check that upper and lower part are 0: //VERIFY(T.topRightCorner(rows-2, cols-2).template triangularView().isZero()); } VERIFY_IS_APPROX(tridiag.diagonal(), T.diagonal()); VERIFY_IS_APPROX(tridiag.subDiagonal(), T.template diagonal<1>()); VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint()); VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView()), tridiag.matrixQ() * tridiag.matrixT() * tridiag.matrixQ().adjoint()); // Test computation of eigenvalues from tridiagonal matrix if(rows > 1) { SelfAdjointEigenSolver eiSymmTridiag; eiSymmTridiag.computeFromTridiagonal(tridiag.matrixT().diagonal(), tridiag.matrixT().diagonal(-1), ComputeEigenvectors); VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmTridiag.eigenvalues()); VERIFY_IS_APPROX(tridiag.matrixT(), eiSymmTridiag.eigenvectors().real() * eiSymmTridiag.eigenvalues().asDiagonal() * eiSymmTridiag.eigenvectors().real().transpose()); } if (rows > 1 && rows < 20) { // Test matrix with NaN symmC(0,0) = std::numeric_limits::quiet_NaN(); SelfAdjointEigenSolver eiSymmNaN(symmC); VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence); } // regression test for bug 1098 { SelfAdjointEigenSolver eig(a.adjoint() * a); eig.compute(a.adjoint() * a); } // regression test for bug 478 { a.setZero(); SelfAdjointEigenSolver ei3(a); VERIFY_IS_EQUAL(ei3.info(), Success); VERIFY_IS_MUCH_SMALLER_THAN(ei3.eigenvalues().norm(),RealScalar(1)); VERIFY((ei3.eigenvectors().transpose()*ei3.eigenvectors().transpose()).eval().isIdentity()); } } template void bug_854() { Matrix3d m; m << 850.961, 51.966, 0, 51.966, 254.841, 0, 0, 0, 0; selfadjointeigensolver_essential_check(m); } template void bug_1014() { Matrix3d m; m << 0.11111111111111114658, 0, 0, 0, 0.11111111111111109107, 0, 0, 0, 0.11111111111111107719; selfadjointeigensolver_essential_check(m); } template void bug_1225() { Matrix3d m1, m2; m1.setRandom(); m1 = m1*m1.transpose(); m2 = m1.triangularView(); SelfAdjointEigenSolver eig1(m1); SelfAdjointEigenSolver eig2(m2.selfadjointView()); VERIFY_IS_APPROX(eig1.eigenvalues(), eig2.eigenvalues()); } template void bug_1204() { SparseMatrix A(2,2); A.setIdentity(); SelfAdjointEigenSolver > eig(A); } EIGEN_DECLARE_TEST(eigensolver_selfadjoint) { int s = 0; for(int i = 0; i < g_repeat; i++) { // trivial test for 1x1 matrices: CALL_SUBTEST_1( selfadjointeigensolver(Matrix())); CALL_SUBTEST_1( selfadjointeigensolver(Matrix())); CALL_SUBTEST_1( selfadjointeigensolver(Matrix, 1, 1>())); // very important to test 3x3 and 2x2 matrices since we provide special paths for them CALL_SUBTEST_12( selfadjointeigensolver(Matrix2f()) ); CALL_SUBTEST_12( selfadjointeigensolver(Matrix2d()) ); CALL_SUBTEST_12( selfadjointeigensolver(Matrix2cd()) ); CALL_SUBTEST_13( selfadjointeigensolver(Matrix3f()) ); CALL_SUBTEST_13( selfadjointeigensolver(Matrix3d()) ); CALL_SUBTEST_13( selfadjointeigensolver(Matrix3cd()) ); CALL_SUBTEST_2( selfadjointeigensolver(Matrix4d()) ); CALL_SUBTEST_2( selfadjointeigensolver(Matrix4cd()) ); s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); CALL_SUBTEST_3( selfadjointeigensolver(MatrixXf(s,s)) ); CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(s,s)) ); CALL_SUBTEST_5( selfadjointeigensolver(MatrixXcd(s,s)) ); CALL_SUBTEST_9( selfadjointeigensolver(Matrix,Dynamic,Dynamic,RowMajor>(s,s)) ); TEST_SET_BUT_UNUSED_VARIABLE(s) // some trivial but implementation-wise tricky cases CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(1,1)) ); CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(2,2)) ); CALL_SUBTEST_5( selfadjointeigensolver(MatrixXcd(1,1)) ); CALL_SUBTEST_5( selfadjointeigensolver(MatrixXcd(2,2)) ); CALL_SUBTEST_6( selfadjointeigensolver(Matrix()) ); CALL_SUBTEST_7( selfadjointeigensolver(Matrix()) ); } CALL_SUBTEST_13( bug_854<0>() ); CALL_SUBTEST_13( bug_1014<0>() ); CALL_SUBTEST_13( bug_1204<0>() ); CALL_SUBTEST_13( bug_1225<0>() ); // Test problem size constructors s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); CALL_SUBTEST_8(SelfAdjointEigenSolver tmp1(s)); CALL_SUBTEST_8(Tridiagonalization tmp2(s)); TEST_SET_BUT_UNUSED_VARIABLE(s) } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/evaluator_common.h ================================================ ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/evaluators.cpp ================================================ #include "main.h" namespace Eigen { template const Product prod(const Lhs& lhs, const Rhs& rhs) { return Product(lhs,rhs); } template const Product lazyprod(const Lhs& lhs, const Rhs& rhs) { return Product(lhs,rhs); } template EIGEN_STRONG_INLINE DstXprType& copy_using_evaluator(const EigenBase &dst, const SrcXprType &src) { call_assignment(dst.const_cast_derived(), src.derived(), internal::assign_op()); return dst.const_cast_derived(); } template class StorageBase, typename SrcXprType> EIGEN_STRONG_INLINE const DstXprType& copy_using_evaluator(const NoAlias& dst, const SrcXprType &src) { call_assignment(dst, src.derived(), internal::assign_op()); return dst.expression(); } template EIGEN_STRONG_INLINE DstXprType& copy_using_evaluator(const PlainObjectBase &dst, const SrcXprType &src) { #ifdef EIGEN_NO_AUTOMATIC_RESIZING eigen_assert((dst.size()==0 || (IsVectorAtCompileTime ? (dst.size() == src.size()) : (dst.rows() == src.rows() && dst.cols() == src.cols()))) && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined"); #else dst.const_cast_derived().resizeLike(src.derived()); #endif call_assignment(dst.const_cast_derived(), src.derived(), internal::assign_op()); return dst.const_cast_derived(); } template void add_assign_using_evaluator(const DstXprType& dst, const SrcXprType& src) { typedef typename DstXprType::Scalar Scalar; call_assignment(const_cast(dst), src.derived(), internal::add_assign_op()); } template void subtract_assign_using_evaluator(const DstXprType& dst, const SrcXprType& src) { typedef typename DstXprType::Scalar Scalar; call_assignment(const_cast(dst), src.derived(), internal::sub_assign_op()); } template void multiply_assign_using_evaluator(const DstXprType& dst, const SrcXprType& src) { typedef typename DstXprType::Scalar Scalar; call_assignment(dst.const_cast_derived(), src.derived(), internal::mul_assign_op()); } template void divide_assign_using_evaluator(const DstXprType& dst, const SrcXprType& src) { typedef typename DstXprType::Scalar Scalar; call_assignment(dst.const_cast_derived(), src.derived(), internal::div_assign_op()); } template void swap_using_evaluator(const DstXprType& dst, const SrcXprType& src) { typedef typename DstXprType::Scalar Scalar; call_assignment(dst.const_cast_derived(), src.const_cast_derived(), internal::swap_assign_op()); } namespace internal { template class StorageBase, typename Src, typename Func> EIGEN_DEVICE_FUNC void call_assignment(const NoAlias& dst, const Src& src, const Func& func) { call_assignment_no_alias(dst.expression(), src, func); } template class StorageBase, typename Src, typename Func> EIGEN_DEVICE_FUNC void call_restricted_packet_assignment(const NoAlias& dst, const Src& src, const Func& func) { call_restricted_packet_assignment_no_alias(dst.expression(), src, func); } } } template long get_cost(const XprType& ) { return Eigen::internal::evaluator::CoeffReadCost; } using namespace std; #define VERIFY_IS_APPROX_EVALUATOR(DEST,EXPR) VERIFY_IS_APPROX(copy_using_evaluator(DEST,(EXPR)), (EXPR).eval()); #define VERIFY_IS_APPROX_EVALUATOR2(DEST,EXPR,REF) VERIFY_IS_APPROX(copy_using_evaluator(DEST,(EXPR)), (REF).eval()); EIGEN_DECLARE_TEST(evaluators) { // Testing Matrix evaluator and Transpose Vector2d v = Vector2d::Random(); const Vector2d v_const(v); Vector2d v2; RowVector2d w; VERIFY_IS_APPROX_EVALUATOR(v2, v); VERIFY_IS_APPROX_EVALUATOR(v2, v_const); // Testing Transpose VERIFY_IS_APPROX_EVALUATOR(w, v.transpose()); // Transpose as rvalue VERIFY_IS_APPROX_EVALUATOR(w, v_const.transpose()); copy_using_evaluator(w.transpose(), v); // Transpose as lvalue VERIFY_IS_APPROX(w,v.transpose().eval()); copy_using_evaluator(w.transpose(), v_const); VERIFY_IS_APPROX(w,v_const.transpose().eval()); // Testing Array evaluator { ArrayXXf a(2,3); ArrayXXf b(3,2); a << 1,2,3, 4,5,6; const ArrayXXf a_const(a); VERIFY_IS_APPROX_EVALUATOR(b, a.transpose()); VERIFY_IS_APPROX_EVALUATOR(b, a_const.transpose()); // Testing CwiseNullaryOp evaluator copy_using_evaluator(w, RowVector2d::Random()); VERIFY((w.array() >= -1).all() && (w.array() <= 1).all()); // not easy to test ... VERIFY_IS_APPROX_EVALUATOR(w, RowVector2d::Zero()); VERIFY_IS_APPROX_EVALUATOR(w, RowVector2d::Constant(3)); // mix CwiseNullaryOp and transpose VERIFY_IS_APPROX_EVALUATOR(w, Vector2d::Zero().transpose()); } { // test product expressions int s = internal::random(1,100); MatrixXf a(s,s), b(s,s), c(s,s), d(s,s); a.setRandom(); b.setRandom(); c.setRandom(); d.setRandom(); VERIFY_IS_APPROX_EVALUATOR(d, (a + b)); VERIFY_IS_APPROX_EVALUATOR(d, (a + b).transpose()); VERIFY_IS_APPROX_EVALUATOR2(d, prod(a,b), a*b); VERIFY_IS_APPROX_EVALUATOR2(d.noalias(), prod(a,b), a*b); VERIFY_IS_APPROX_EVALUATOR2(d, prod(a,b) + c, a*b + c); VERIFY_IS_APPROX_EVALUATOR2(d, s * prod(a,b), s * a*b); VERIFY_IS_APPROX_EVALUATOR2(d, prod(a,b).transpose(), (a*b).transpose()); VERIFY_IS_APPROX_EVALUATOR2(d, prod(a,b) + prod(b,c), a*b + b*c); // check that prod works even with aliasing present c = a*a; copy_using_evaluator(a, prod(a,a)); VERIFY_IS_APPROX(a,c); // check compound assignment of products d = c; add_assign_using_evaluator(c.noalias(), prod(a,b)); d.noalias() += a*b; VERIFY_IS_APPROX(c, d); d = c; subtract_assign_using_evaluator(c.noalias(), prod(a,b)); d.noalias() -= a*b; VERIFY_IS_APPROX(c, d); } { // test product with all possible sizes int s = internal::random(1,100); Matrix m11, res11; m11.setRandom(1,1); Matrix m14, res14; m14.setRandom(1,4); Matrix m1X, res1X; m1X.setRandom(1,s); Matrix m41, res41; m41.setRandom(4,1); Matrix m44, res44; m44.setRandom(4,4); Matrix m4X, res4X; m4X.setRandom(4,s); Matrix mX1, resX1; mX1.setRandom(s,1); Matrix mX4, resX4; mX4.setRandom(s,4); Matrix mXX, resXX; mXX.setRandom(s,s); VERIFY_IS_APPROX_EVALUATOR2(res11, prod(m11,m11), m11*m11); VERIFY_IS_APPROX_EVALUATOR2(res11, prod(m14,m41), m14*m41); VERIFY_IS_APPROX_EVALUATOR2(res11, prod(m1X,mX1), m1X*mX1); VERIFY_IS_APPROX_EVALUATOR2(res14, prod(m11,m14), m11*m14); VERIFY_IS_APPROX_EVALUATOR2(res14, prod(m14,m44), m14*m44); VERIFY_IS_APPROX_EVALUATOR2(res14, prod(m1X,mX4), m1X*mX4); VERIFY_IS_APPROX_EVALUATOR2(res1X, prod(m11,m1X), m11*m1X); VERIFY_IS_APPROX_EVALUATOR2(res1X, prod(m14,m4X), m14*m4X); VERIFY_IS_APPROX_EVALUATOR2(res1X, prod(m1X,mXX), m1X*mXX); VERIFY_IS_APPROX_EVALUATOR2(res41, prod(m41,m11), m41*m11); VERIFY_IS_APPROX_EVALUATOR2(res41, prod(m44,m41), m44*m41); VERIFY_IS_APPROX_EVALUATOR2(res41, prod(m4X,mX1), m4X*mX1); VERIFY_IS_APPROX_EVALUATOR2(res44, prod(m41,m14), m41*m14); VERIFY_IS_APPROX_EVALUATOR2(res44, prod(m44,m44), m44*m44); VERIFY_IS_APPROX_EVALUATOR2(res44, prod(m4X,mX4), m4X*mX4); VERIFY_IS_APPROX_EVALUATOR2(res4X, prod(m41,m1X), m41*m1X); VERIFY_IS_APPROX_EVALUATOR2(res4X, prod(m44,m4X), m44*m4X); VERIFY_IS_APPROX_EVALUATOR2(res4X, prod(m4X,mXX), m4X*mXX); VERIFY_IS_APPROX_EVALUATOR2(resX1, prod(mX1,m11), mX1*m11); VERIFY_IS_APPROX_EVALUATOR2(resX1, prod(mX4,m41), mX4*m41); VERIFY_IS_APPROX_EVALUATOR2(resX1, prod(mXX,mX1), mXX*mX1); VERIFY_IS_APPROX_EVALUATOR2(resX4, prod(mX1,m14), mX1*m14); VERIFY_IS_APPROX_EVALUATOR2(resX4, prod(mX4,m44), mX4*m44); VERIFY_IS_APPROX_EVALUATOR2(resX4, prod(mXX,mX4), mXX*mX4); VERIFY_IS_APPROX_EVALUATOR2(resXX, prod(mX1,m1X), mX1*m1X); VERIFY_IS_APPROX_EVALUATOR2(resXX, prod(mX4,m4X), mX4*m4X); VERIFY_IS_APPROX_EVALUATOR2(resXX, prod(mXX,mXX), mXX*mXX); } { ArrayXXf a(2,3); ArrayXXf b(3,2); a << 1,2,3, 4,5,6; const ArrayXXf a_const(a); // this does not work because Random is eval-before-nested: // copy_using_evaluator(w, Vector2d::Random().transpose()); // test CwiseUnaryOp VERIFY_IS_APPROX_EVALUATOR(v2, 3 * v); VERIFY_IS_APPROX_EVALUATOR(w, (3 * v).transpose()); VERIFY_IS_APPROX_EVALUATOR(b, (a + 3).transpose()); VERIFY_IS_APPROX_EVALUATOR(b, (2 * a_const + 3).transpose()); // test CwiseBinaryOp VERIFY_IS_APPROX_EVALUATOR(v2, v + Vector2d::Ones()); VERIFY_IS_APPROX_EVALUATOR(w, (v + Vector2d::Ones()).transpose().cwiseProduct(RowVector2d::Constant(3))); // dynamic matrices and arrays MatrixXd mat1(6,6), mat2(6,6); VERIFY_IS_APPROX_EVALUATOR(mat1, MatrixXd::Identity(6,6)); VERIFY_IS_APPROX_EVALUATOR(mat2, mat1); copy_using_evaluator(mat2.transpose(), mat1); VERIFY_IS_APPROX(mat2.transpose(), mat1); ArrayXXd arr1(6,6), arr2(6,6); VERIFY_IS_APPROX_EVALUATOR(arr1, ArrayXXd::Constant(6,6, 3.0)); VERIFY_IS_APPROX_EVALUATOR(arr2, arr1); // test automatic resizing mat2.resize(3,3); VERIFY_IS_APPROX_EVALUATOR(mat2, mat1); arr2.resize(9,9); VERIFY_IS_APPROX_EVALUATOR(arr2, arr1); // test direct traversal Matrix3f m3; Array33f a3; VERIFY_IS_APPROX_EVALUATOR(m3, Matrix3f::Identity()); // matrix, nullary // TODO: find a way to test direct traversal with array VERIFY_IS_APPROX_EVALUATOR(m3.transpose(), Matrix3f::Identity().transpose()); // transpose VERIFY_IS_APPROX_EVALUATOR(m3, 2 * Matrix3f::Identity()); // unary VERIFY_IS_APPROX_EVALUATOR(m3, Matrix3f::Identity() + Matrix3f::Zero()); // binary VERIFY_IS_APPROX_EVALUATOR(m3.block(0,0,2,2), Matrix3f::Identity().block(1,1,2,2)); // block // test linear traversal VERIFY_IS_APPROX_EVALUATOR(m3, Matrix3f::Zero()); // matrix, nullary VERIFY_IS_APPROX_EVALUATOR(a3, Array33f::Zero()); // array VERIFY_IS_APPROX_EVALUATOR(m3.transpose(), Matrix3f::Zero().transpose()); // transpose VERIFY_IS_APPROX_EVALUATOR(m3, 2 * Matrix3f::Zero()); // unary VERIFY_IS_APPROX_EVALUATOR(m3, Matrix3f::Zero() + m3); // binary // test inner vectorization Matrix4f m4, m4src = Matrix4f::Random(); Array44f a4, a4src = Matrix4f::Random(); VERIFY_IS_APPROX_EVALUATOR(m4, m4src); // matrix VERIFY_IS_APPROX_EVALUATOR(a4, a4src); // array VERIFY_IS_APPROX_EVALUATOR(m4.transpose(), m4src.transpose()); // transpose // TODO: find out why Matrix4f::Zero() does not allow inner vectorization VERIFY_IS_APPROX_EVALUATOR(m4, 2 * m4src); // unary VERIFY_IS_APPROX_EVALUATOR(m4, m4src + m4src); // binary // test linear vectorization MatrixXf mX(6,6), mXsrc = MatrixXf::Random(6,6); ArrayXXf aX(6,6), aXsrc = ArrayXXf::Random(6,6); VERIFY_IS_APPROX_EVALUATOR(mX, mXsrc); // matrix VERIFY_IS_APPROX_EVALUATOR(aX, aXsrc); // array VERIFY_IS_APPROX_EVALUATOR(mX.transpose(), mXsrc.transpose()); // transpose VERIFY_IS_APPROX_EVALUATOR(mX, MatrixXf::Zero(6,6)); // nullary VERIFY_IS_APPROX_EVALUATOR(mX, 2 * mXsrc); // unary VERIFY_IS_APPROX_EVALUATOR(mX, mXsrc + mXsrc); // binary // test blocks and slice vectorization VERIFY_IS_APPROX_EVALUATOR(m4, (mXsrc.block<4,4>(1,0))); VERIFY_IS_APPROX_EVALUATOR(aX, ArrayXXf::Constant(10, 10, 3.0).block(2, 3, 6, 6)); Matrix4f m4ref = m4; copy_using_evaluator(m4.block(1, 1, 2, 3), m3.bottomRows(2)); m4ref.block(1, 1, 2, 3) = m3.bottomRows(2); VERIFY_IS_APPROX(m4, m4ref); mX.setIdentity(20,20); MatrixXf mXref = MatrixXf::Identity(20,20); mXsrc = MatrixXf::Random(9,12); copy_using_evaluator(mX.block(4, 4, 9, 12), mXsrc); mXref.block(4, 4, 9, 12) = mXsrc; VERIFY_IS_APPROX(mX, mXref); // test Map const float raw[3] = {1,2,3}; float buffer[3] = {0,0,0}; Vector3f v3; Array3f a3f; VERIFY_IS_APPROX_EVALUATOR(v3, Map(raw)); VERIFY_IS_APPROX_EVALUATOR(a3f, Map(raw)); Vector3f::Map(buffer) = 2*v3; VERIFY(buffer[0] == 2); VERIFY(buffer[1] == 4); VERIFY(buffer[2] == 6); // test CwiseUnaryView mat1.setRandom(); mat2.setIdentity(); MatrixXcd matXcd(6,6), matXcd_ref(6,6); copy_using_evaluator(matXcd.real(), mat1); copy_using_evaluator(matXcd.imag(), mat2); matXcd_ref.real() = mat1; matXcd_ref.imag() = mat2; VERIFY_IS_APPROX(matXcd, matXcd_ref); // test Select VERIFY_IS_APPROX_EVALUATOR(aX, (aXsrc > 0).select(aXsrc, -aXsrc)); // test Replicate mXsrc = MatrixXf::Random(6, 6); VectorXf vX = VectorXf::Random(6); mX.resize(6, 6); VERIFY_IS_APPROX_EVALUATOR(mX, mXsrc.colwise() + vX); matXcd.resize(12, 12); VERIFY_IS_APPROX_EVALUATOR(matXcd, matXcd_ref.replicate(2,2)); VERIFY_IS_APPROX_EVALUATOR(matXcd, (matXcd_ref.replicate<2,2>())); // test partial reductions VectorXd vec1(6); VERIFY_IS_APPROX_EVALUATOR(vec1, mat1.rowwise().sum()); VERIFY_IS_APPROX_EVALUATOR(vec1, mat1.colwise().sum().transpose()); // test MatrixWrapper and ArrayWrapper mat1.setRandom(6,6); arr1.setRandom(6,6); VERIFY_IS_APPROX_EVALUATOR(mat2, arr1.matrix()); VERIFY_IS_APPROX_EVALUATOR(arr2, mat1.array()); VERIFY_IS_APPROX_EVALUATOR(mat2, (arr1 + 2).matrix()); VERIFY_IS_APPROX_EVALUATOR(arr2, mat1.array() + 2); mat2.array() = arr1 * arr1; VERIFY_IS_APPROX(mat2, (arr1 * arr1).matrix()); arr2.matrix() = MatrixXd::Identity(6,6); VERIFY_IS_APPROX(arr2, MatrixXd::Identity(6,6).array()); // test Reverse VERIFY_IS_APPROX_EVALUATOR(arr2, arr1.reverse()); VERIFY_IS_APPROX_EVALUATOR(arr2, arr1.colwise().reverse()); VERIFY_IS_APPROX_EVALUATOR(arr2, arr1.rowwise().reverse()); arr2.reverse() = arr1; VERIFY_IS_APPROX(arr2, arr1.reverse()); mat2.array() = mat1.array().reverse(); VERIFY_IS_APPROX(mat2.array(), mat1.array().reverse()); // test Diagonal VERIFY_IS_APPROX_EVALUATOR(vec1, mat1.diagonal()); vec1.resize(5); VERIFY_IS_APPROX_EVALUATOR(vec1, mat1.diagonal(1)); VERIFY_IS_APPROX_EVALUATOR(vec1, mat1.diagonal<-1>()); vec1.setRandom(); mat2 = mat1; copy_using_evaluator(mat1.diagonal(1), vec1); mat2.diagonal(1) = vec1; VERIFY_IS_APPROX(mat1, mat2); copy_using_evaluator(mat1.diagonal<-1>(), mat1.diagonal(1)); mat2.diagonal<-1>() = mat2.diagonal(1); VERIFY_IS_APPROX(mat1, mat2); } { // test swapping MatrixXd mat1, mat2, mat1ref, mat2ref; mat1ref = mat1 = MatrixXd::Random(6, 6); mat2ref = mat2 = 2 * mat1 + MatrixXd::Identity(6, 6); swap_using_evaluator(mat1, mat2); mat1ref.swap(mat2ref); VERIFY_IS_APPROX(mat1, mat1ref); VERIFY_IS_APPROX(mat2, mat2ref); swap_using_evaluator(mat1.block(0, 0, 3, 3), mat2.block(3, 3, 3, 3)); mat1ref.block(0, 0, 3, 3).swap(mat2ref.block(3, 3, 3, 3)); VERIFY_IS_APPROX(mat1, mat1ref); VERIFY_IS_APPROX(mat2, mat2ref); swap_using_evaluator(mat1.row(2), mat2.col(3).transpose()); mat1.row(2).swap(mat2.col(3).transpose()); VERIFY_IS_APPROX(mat1, mat1ref); VERIFY_IS_APPROX(mat2, mat2ref); } { // test compound assignment const Matrix4d mat_const = Matrix4d::Random(); Matrix4d mat, mat_ref; mat = mat_ref = Matrix4d::Identity(); add_assign_using_evaluator(mat, mat_const); mat_ref += mat_const; VERIFY_IS_APPROX(mat, mat_ref); subtract_assign_using_evaluator(mat.row(1), 2*mat.row(2)); mat_ref.row(1) -= 2*mat_ref.row(2); VERIFY_IS_APPROX(mat, mat_ref); const ArrayXXf arr_const = ArrayXXf::Random(5,3); ArrayXXf arr, arr_ref; arr = arr_ref = ArrayXXf::Constant(5, 3, 0.5); multiply_assign_using_evaluator(arr, arr_const); arr_ref *= arr_const; VERIFY_IS_APPROX(arr, arr_ref); divide_assign_using_evaluator(arr.row(1), arr.row(2) + 1); arr_ref.row(1) /= (arr_ref.row(2) + 1); VERIFY_IS_APPROX(arr, arr_ref); } { // test triangular shapes MatrixXd A = MatrixXd::Random(6,6), B(6,6), C(6,6), D(6,6); A.setRandom();B.setRandom(); VERIFY_IS_APPROX_EVALUATOR2(B, A.triangularView(), MatrixXd(A.triangularView())); A.setRandom();B.setRandom(); VERIFY_IS_APPROX_EVALUATOR2(B, A.triangularView(), MatrixXd(A.triangularView())); A.setRandom();B.setRandom(); VERIFY_IS_APPROX_EVALUATOR2(B, A.triangularView(), MatrixXd(A.triangularView())); A.setRandom();B.setRandom(); C = B; C.triangularView() = A; copy_using_evaluator(B.triangularView(), A); VERIFY(B.isApprox(C) && "copy_using_evaluator(B.triangularView(), A)"); A.setRandom();B.setRandom(); C = B; C.triangularView() = A.triangularView(); copy_using_evaluator(B.triangularView(), A.triangularView()); VERIFY(B.isApprox(C) && "copy_using_evaluator(B.triangularView(), A.triangularView())"); A.setRandom();B.setRandom(); C = B; C.triangularView() = A.triangularView().transpose(); copy_using_evaluator(B.triangularView(), A.triangularView().transpose()); VERIFY(B.isApprox(C) && "copy_using_evaluator(B.triangularView(), A.triangularView().transpose())"); A.setRandom();B.setRandom(); C = B; D = A; C.triangularView().swap(D.triangularView()); swap_using_evaluator(B.triangularView(), A.triangularView()); VERIFY(B.isApprox(C) && "swap_using_evaluator(B.triangularView(), A.triangularView())"); VERIFY_IS_APPROX_EVALUATOR2(B, prod(A.triangularView(),A), MatrixXd(A.triangularView()*A)); VERIFY_IS_APPROX_EVALUATOR2(B, prod(A.selfadjointView(),A), MatrixXd(A.selfadjointView()*A)); } { // test diagonal shapes VectorXd d = VectorXd::Random(6); MatrixXd A = MatrixXd::Random(6,6), B(6,6); A.setRandom();B.setRandom(); VERIFY_IS_APPROX_EVALUATOR2(B, lazyprod(d.asDiagonal(),A), MatrixXd(d.asDiagonal()*A)); VERIFY_IS_APPROX_EVALUATOR2(B, lazyprod(A,d.asDiagonal()), MatrixXd(A*d.asDiagonal())); } { // test CoeffReadCost Matrix4d a, b; VERIFY_IS_EQUAL( get_cost(a), 1 ); VERIFY_IS_EQUAL( get_cost(a+b), 3); VERIFY_IS_EQUAL( get_cost(2*a+b), 4); VERIFY_IS_EQUAL( get_cost(a*b), 1); VERIFY_IS_EQUAL( get_cost(a.lazyProduct(b)), 15); VERIFY_IS_EQUAL( get_cost(a*(a*b)), 1); VERIFY_IS_EQUAL( get_cost(a.lazyProduct(a*b)), 15); VERIFY_IS_EQUAL( get_cost(a*(a+b)), 1); VERIFY_IS_EQUAL( get_cost(a.lazyProduct(a+b)), 15); } // regression test for PR 544 and bug 1622 (introduced in #71609c4) { // test restricted_packet_assignment with an unaligned destination const size_t M = 2; const size_t K = 2; const size_t N = 5; float *destMem = new float[(M*N) + 1]; float *dest = (internal::UIntPtr(destMem)%EIGEN_MAX_ALIGN_BYTES) == 0 ? destMem+1 : destMem; const Matrix a = Matrix::Random(M, K); const Matrix b = Matrix::Random(K, N); Map > z(dest, M, N);; Product, Matrix, LazyProduct> tmp(a,b); internal::call_restricted_packet_assignment(z.noalias(), tmp.derived(), internal::assign_op()); VERIFY_IS_APPROX(z, a*b); delete[] destMem; } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/exceptions.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // Various sanity tests with exceptions and non trivially copyable scalar type. // - no memory leak when a custom scalar type trow an exceptions // - todo: complete the list of tests! #define EIGEN_STACK_ALLOCATION_LIMIT 100000000 #include "main.h" #include "AnnoyingScalar.h" #define CHECK_MEMLEAK(OP) { \ AnnoyingScalar::countdown = 100; \ int before = AnnoyingScalar::instances; \ bool exception_thrown = false; \ try { OP; } \ catch (my_exception) { \ exception_thrown = true; \ VERIFY(AnnoyingScalar::instances==before && "memory leak detected in " && EIGEN_MAKESTRING(OP)); \ } \ VERIFY( (AnnoyingScalar::dont_throw) || (exception_thrown && " no exception thrown in " && EIGEN_MAKESTRING(OP)) ); \ } EIGEN_DECLARE_TEST(exceptions) { typedef Eigen::Matrix VectorType; typedef Eigen::Matrix MatrixType; { AnnoyingScalar::dont_throw = false; int n = 50; VectorType v0(n), v1(n); MatrixType m0(n,n), m1(n,n), m2(n,n); v0.setOnes(); v1.setOnes(); m0.setOnes(); m1.setOnes(); m2.setOnes(); CHECK_MEMLEAK(v0 = m0 * m1 * v1); CHECK_MEMLEAK(m2 = m0 * m1 * m2); CHECK_MEMLEAK((v0+v1).dot(v0+v1)); } VERIFY(AnnoyingScalar::instances==0 && "global memory leak detected in " && EIGEN_MAKESTRING(OP)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/fastmath.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" void check(bool b, bool ref) { std::cout << b; if(b==ref) std::cout << " OK "; else std::cout << " BAD "; } #if EIGEN_COMP_MSVC && EIGEN_COMP_MSVC < 1800 namespace std { template bool (isfinite)(T x) { return _finite(x); } template bool (isnan)(T x) { return _isnan(x); } template bool (isinf)(T x) { return _fpclass(x)==_FPCLASS_NINF || _fpclass(x)==_FPCLASS_PINF; } } #endif template void check_inf_nan(bool dryrun) { Matrix m(10); m.setRandom(); m(3) = std::numeric_limits::quiet_NaN(); if(dryrun) { std::cout << "std::isfinite(" << m(3) << ") = "; check((std::isfinite)(m(3)),false); std::cout << " ; numext::isfinite = "; check((numext::isfinite)(m(3)), false); std::cout << "\n"; std::cout << "std::isinf(" << m(3) << ") = "; check((std::isinf)(m(3)),false); std::cout << " ; numext::isinf = "; check((numext::isinf)(m(3)), false); std::cout << "\n"; std::cout << "std::isnan(" << m(3) << ") = "; check((std::isnan)(m(3)),true); std::cout << " ; numext::isnan = "; check((numext::isnan)(m(3)), true); std::cout << "\n"; std::cout << "allFinite: "; check(m.allFinite(), 0); std::cout << "\n"; std::cout << "hasNaN: "; check(m.hasNaN(), 1); std::cout << "\n"; std::cout << "\n"; } else { if( (std::isfinite)(m(3))) g_test_level=1; VERIFY( !(numext::isfinite)(m(3)) ); g_test_level=0; if( (std::isinf) (m(3))) g_test_level=1; VERIFY( !(numext::isinf)(m(3)) ); g_test_level=0; if(!(std::isnan) (m(3))) g_test_level=1; VERIFY( (numext::isnan)(m(3)) ); g_test_level=0; if( (std::isfinite)(m(3))) g_test_level=1; VERIFY( !m.allFinite() ); g_test_level=0; if(!(std::isnan) (m(3))) g_test_level=1; VERIFY( m.hasNaN() ); g_test_level=0; } T hidden_zero = (std::numeric_limits::min)()*(std::numeric_limits::min)(); m(4) /= hidden_zero; if(dryrun) { std::cout << "std::isfinite(" << m(4) << ") = "; check((std::isfinite)(m(4)),false); std::cout << " ; numext::isfinite = "; check((numext::isfinite)(m(4)), false); std::cout << "\n"; std::cout << "std::isinf(" << m(4) << ") = "; check((std::isinf)(m(4)),true); std::cout << " ; numext::isinf = "; check((numext::isinf)(m(4)), true); std::cout << "\n"; std::cout << "std::isnan(" << m(4) << ") = "; check((std::isnan)(m(4)),false); std::cout << " ; numext::isnan = "; check((numext::isnan)(m(4)), false); std::cout << "\n"; std::cout << "allFinite: "; check(m.allFinite(), 0); std::cout << "\n"; std::cout << "hasNaN: "; check(m.hasNaN(), 1); std::cout << "\n"; std::cout << "\n"; } else { if( (std::isfinite)(m(3))) g_test_level=1; VERIFY( !(numext::isfinite)(m(4)) ); g_test_level=0; if(!(std::isinf) (m(3))) g_test_level=1; VERIFY( (numext::isinf)(m(4)) ); g_test_level=0; if( (std::isnan) (m(3))) g_test_level=1; VERIFY( !(numext::isnan)(m(4)) ); g_test_level=0; if( (std::isfinite)(m(3))) g_test_level=1; VERIFY( !m.allFinite() ); g_test_level=0; if(!(std::isnan) (m(3))) g_test_level=1; VERIFY( m.hasNaN() ); g_test_level=0; } m(3) = 0; if(dryrun) { std::cout << "std::isfinite(" << m(3) << ") = "; check((std::isfinite)(m(3)),true); std::cout << " ; numext::isfinite = "; check((numext::isfinite)(m(3)), true); std::cout << "\n"; std::cout << "std::isinf(" << m(3) << ") = "; check((std::isinf)(m(3)),false); std::cout << " ; numext::isinf = "; check((numext::isinf)(m(3)), false); std::cout << "\n"; std::cout << "std::isnan(" << m(3) << ") = "; check((std::isnan)(m(3)),false); std::cout << " ; numext::isnan = "; check((numext::isnan)(m(3)), false); std::cout << "\n"; std::cout << "allFinite: "; check(m.allFinite(), 0); std::cout << "\n"; std::cout << "hasNaN: "; check(m.hasNaN(), 0); std::cout << "\n"; std::cout << "\n\n"; } else { if(!(std::isfinite)(m(3))) g_test_level=1; VERIFY( (numext::isfinite)(m(3)) ); g_test_level=0; if( (std::isinf) (m(3))) g_test_level=1; VERIFY( !(numext::isinf)(m(3)) ); g_test_level=0; if( (std::isnan) (m(3))) g_test_level=1; VERIFY( !(numext::isnan)(m(3)) ); g_test_level=0; if( (std::isfinite)(m(3))) g_test_level=1; VERIFY( !m.allFinite() ); g_test_level=0; if( (std::isnan) (m(3))) g_test_level=1; VERIFY( !m.hasNaN() ); g_test_level=0; } } EIGEN_DECLARE_TEST(fastmath) { std::cout << "*** float *** \n\n"; check_inf_nan(true); std::cout << "*** double ***\n\n"; check_inf_nan(true); std::cout << "*** long double *** \n\n"; check_inf_nan(true); check_inf_nan(false); check_inf_nan(false); check_inf_nan(false); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/first_aligned.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" template void test_first_aligned_helper(Scalar *array, int size) { const int packet_size = sizeof(Scalar) * internal::packet_traits::size; VERIFY(((size_t(array) + sizeof(Scalar) * internal::first_default_aligned(array, size)) % packet_size) == 0); } template void test_none_aligned_helper(Scalar *array, int size) { EIGEN_UNUSED_VARIABLE(array); EIGEN_UNUSED_VARIABLE(size); VERIFY(internal::packet_traits::size == 1 || internal::first_default_aligned(array, size) == size); } struct some_non_vectorizable_type { float x; }; EIGEN_DECLARE_TEST(first_aligned) { EIGEN_ALIGN16 float array_float[100]; test_first_aligned_helper(array_float, 50); test_first_aligned_helper(array_float+1, 50); test_first_aligned_helper(array_float+2, 50); test_first_aligned_helper(array_float+3, 50); test_first_aligned_helper(array_float+4, 50); test_first_aligned_helper(array_float+5, 50); EIGEN_ALIGN16 double array_double[100]; test_first_aligned_helper(array_double, 50); test_first_aligned_helper(array_double+1, 50); test_first_aligned_helper(array_double+2, 50); double *array_double_plus_4_bytes = (double*)(internal::UIntPtr(array_double)+4); test_none_aligned_helper(array_double_plus_4_bytes, 50); test_none_aligned_helper(array_double_plus_4_bytes+1, 50); some_non_vectorizable_type array_nonvec[100]; test_first_aligned_helper(array_nonvec, 100); test_none_aligned_helper(array_nonvec, 100); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/geo_alignedbox.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include using namespace std; // NOTE the following workaround was needed on some 32 bits builds to kill extra precision of x87 registers. // It seems that it is not needed anymore, but let's keep it here, just in case... template EIGEN_DONT_INLINE void kill_extra_precision(T& /* x */) { // This one worked but triggered a warning: /* eigen_assert((void*)(&x) != (void*)0); */ // An alternative could be: /* volatile T tmp = x; */ /* x = tmp; */ } template void alignedbox(const BoxType& box) { /* this test covers the following files: AlignedBox.h */ typedef typename BoxType::Scalar Scalar; typedef NumTraits ScalarTraits; typedef typename ScalarTraits::Real RealScalar; typedef Matrix VectorType; const Index dim = box.dim(); VectorType p0 = VectorType::Random(dim); VectorType p1 = VectorType::Random(dim); while( p1 == p0 ){ p1 = VectorType::Random(dim); } RealScalar s1 = internal::random(0,1); BoxType b0(dim); BoxType b1(VectorType::Random(dim),VectorType::Random(dim)); BoxType b2; kill_extra_precision(b1); kill_extra_precision(p0); kill_extra_precision(p1); b0.extend(p0); b0.extend(p1); VERIFY(b0.contains(p0*s1+(Scalar(1)-s1)*p1)); VERIFY(b0.contains(b0.center())); VERIFY_IS_APPROX(b0.center(),(p0+p1)/Scalar(2)); (b2 = b0).extend(b1); VERIFY(b2.contains(b0)); VERIFY(b2.contains(b1)); VERIFY_IS_APPROX(b2.clamp(b0), b0); // intersection BoxType box1(VectorType::Random(dim)); box1.extend(VectorType::Random(dim)); BoxType box2(VectorType::Random(dim)); box2.extend(VectorType::Random(dim)); VERIFY(box1.intersects(box2) == !box1.intersection(box2).isEmpty()); // alignment -- make sure there is no memory alignment assertion BoxType *bp0 = new BoxType(dim); BoxType *bp1 = new BoxType(dim); bp0->extend(*bp1); delete bp0; delete bp1; // sampling for( int i=0; i<10; ++i ) { VectorType r = b0.sample(); VERIFY(b0.contains(r)); } } template void alignedboxTranslatable(const BoxType& box) { typedef typename BoxType::Scalar Scalar; typedef Matrix VectorType; typedef Transform IsometryTransform; typedef Transform AffineTransform; alignedbox(box); const VectorType Ones = VectorType::Ones(); const VectorType UnitX = VectorType::UnitX(); const Index dim = box.dim(); // box((-1, -1, -1), (1, 1, 1)) BoxType a(-Ones, Ones); VERIFY_IS_APPROX(a.sizes(), Ones * Scalar(2)); BoxType b = a; VectorType translate = Ones; translate[0] = Scalar(2); b.translate(translate); // translate by (2, 1, 1) -> box((1, 0, 0), (3, 2, 2)) VERIFY_IS_APPROX(b.sizes(), Ones * Scalar(2)); VERIFY_IS_APPROX((b.min)(), UnitX); VERIFY_IS_APPROX((b.max)(), Ones * Scalar(2) + UnitX); // Test transform IsometryTransform tf = IsometryTransform::Identity(); tf.translation() = -translate; BoxType c = b.transformed(tf); // translate by (-2, -1, -1) -> box((-1, -1, -1), (1, 1, 1)) VERIFY_IS_APPROX(c.sizes(), a.sizes()); VERIFY_IS_APPROX((c.min)(), (a.min)()); VERIFY_IS_APPROX((c.max)(), (a.max)()); c.transform(tf); // translate by (-2, -1, -1) -> box((-3, -2, -2), (-1, 0, 0)) VERIFY_IS_APPROX(c.sizes(), a.sizes()); VERIFY_IS_APPROX((c.min)(), Ones * Scalar(-2) - UnitX); VERIFY_IS_APPROX((c.max)(), -UnitX); // Scaling AffineTransform atf = AffineTransform::Identity(); atf.scale(Scalar(3)); c.transform(atf); // scale by 3 -> box((-9, -6, -6), (-3, 0, 0)) VERIFY_IS_APPROX(c.sizes(), Scalar(3) * a.sizes()); VERIFY_IS_APPROX((c.min)(), Ones * Scalar(-6) - UnitX * Scalar(3)); VERIFY_IS_APPROX((c.max)(), UnitX * Scalar(-3)); atf = AffineTransform::Identity(); atf.scale(Scalar(-3)); c.transform(atf); // scale by -3 -> box((27, 18, 18), (9, 0, 0)) VERIFY_IS_APPROX(c.sizes(), Scalar(9) * a.sizes()); VERIFY_IS_APPROX((c.min)(), UnitX * Scalar(9)); VERIFY_IS_APPROX((c.max)(), Ones * Scalar(18) + UnitX * Scalar(9)); // Check identity transform within numerical precision. BoxType transformedC = c.transformed(IsometryTransform::Identity()); VERIFY_IS_APPROX(transformedC, c); for (size_t i = 0; i < 10; ++i) { VectorType minCorner; VectorType maxCorner; for (Index d = 0; d < dim; ++d) { minCorner[d] = internal::random(-10,10); maxCorner[d] = minCorner[d] + internal::random(0, 10); } c = BoxType(minCorner, maxCorner); translate = VectorType::Random(); c.translate(translate); VERIFY_IS_APPROX((c.min)(), minCorner + translate); VERIFY_IS_APPROX((c.max)(), maxCorner + translate); } } template Rotation rotate2D(Scalar angle) { return Rotation2D(angle); } template Rotation rotate2DIntegral(typename NumTraits::NonInteger angle) { typedef typename NumTraits::NonInteger NonInteger; return Rotation2D(angle).toRotationMatrix(). template cast(); } template Rotation rotate3DZAxis(Scalar angle) { return AngleAxis(angle, Matrix(0, 0, 1)); } template Rotation rotate3DZAxisIntegral(typename NumTraits::NonInteger angle) { typedef typename NumTraits::NonInteger NonInteger; return AngleAxis(angle, Matrix(0, 0, 1)). toRotationMatrix().template cast(); } template Rotation rotate4DZWAxis(Scalar angle) { Rotation result = Matrix::Identity(); result.block(0, 0, 3, 3) = rotate3DZAxis(angle).toRotationMatrix(); return result; } template MatrixType randomRotationMatrix() { // algorithm from // https://www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net/III-7/103/2016/isprs-annals-III-7-103-2016.pdf const MatrixType rand = MatrixType::Random(); const MatrixType q = rand.householderQr().householderQ(); const JacobiSVD svd = q.jacobiSvd(ComputeFullU | ComputeFullV); const typename MatrixType::Scalar det = (svd.matrixU() * svd.matrixV().transpose()).determinant(); MatrixType diag = rand.Identity(); diag(MatrixType::RowsAtCompileTime - 1, MatrixType::ColsAtCompileTime - 1) = det; const MatrixType rotation = svd.matrixU() * diag * svd.matrixV().transpose(); return rotation; } template Matrix boxGetCorners(const Matrix& min_, const Matrix& max_) { Matrix result; for(Index i=0; i<(1< void alignedboxRotatable( const BoxType& box, Rotation (*rotate)(typename NumTraits::NonInteger /*_angle*/)) { alignedboxTranslatable(box); typedef typename BoxType::Scalar Scalar; typedef typename NumTraits::NonInteger NonInteger; typedef Matrix VectorType; typedef Transform IsometryTransform; typedef Transform AffineTransform; const VectorType Zero = VectorType::Zero(); const VectorType Ones = VectorType::Ones(); const VectorType UnitX = VectorType::UnitX(); const VectorType UnitY = VectorType::UnitY(); // this is vector (0, 0, -1, -1, -1, ...), i.e. with zeros at first and second dimensions const VectorType UnitZ = Ones - UnitX - UnitY; // in this kind of comments the 3D case values will be illustrated // box((-1, -1, -1), (1, 1, 1)) BoxType a(-Ones, Ones); // to allow templating this test for both 2D and 3D cases, we always set all // but the first coordinate to the same value; so basically 3D case works as // if you were looking at the scene from top VectorType minPoint = -2 * Ones; minPoint[0] = -3; VectorType maxPoint = Zero; maxPoint[0] = -1; BoxType c(minPoint, maxPoint); // box((-3, -2, -2), (-1, 0, 0)) IsometryTransform tf2 = IsometryTransform::Identity(); // for some weird reason the following statement has to be put separate from // the following rotate call, otherwise precision problems arise... Rotation rot = rotate(NonInteger(EIGEN_PI)); tf2.rotate(rot); c.transform(tf2); // rotate by 180 deg around origin -> box((1, 0, -2), (3, 2, 0)) VERIFY_IS_APPROX(c.sizes(), a.sizes()); VERIFY_IS_APPROX((c.min)(), UnitX - UnitZ * Scalar(2)); VERIFY_IS_APPROX((c.max)(), UnitX * Scalar(3) + UnitY * Scalar(2)); rot = rotate(NonInteger(EIGEN_PI / 2)); tf2.setIdentity(); tf2.rotate(rot); c.transform(tf2); // rotate by 90 deg around origin -> box((-2, 1, -2), (0, 3, 0)) VERIFY_IS_APPROX(c.sizes(), a.sizes()); VERIFY_IS_APPROX((c.min)(), Ones * Scalar(-2) + UnitY * Scalar(3)); VERIFY_IS_APPROX((c.max)(), UnitY * Scalar(3)); // box((-1, -1, -1), (1, 1, 1)) AffineTransform atf = AffineTransform::Identity(); atf.linearExt()(0, 1) = Scalar(1); c = BoxType(-Ones, Ones); c.transform(atf); // 45 deg shear in x direction -> box((-2, -1, -1), (2, 1, 1)) VERIFY_IS_APPROX(c.sizes(), Ones * Scalar(2) + UnitX * Scalar(2)); VERIFY_IS_APPROX((c.min)(), -Ones - UnitX); VERIFY_IS_APPROX((c.max)(), Ones + UnitX); } template void alignedboxNonIntegralRotatable( const BoxType& box, Rotation (*rotate)(typename NumTraits::NonInteger /*_angle*/)) { alignedboxRotatable(box, rotate); typedef typename BoxType::Scalar Scalar; typedef typename NumTraits::NonInteger NonInteger; enum { Dim = BoxType::AmbientDimAtCompileTime }; typedef Matrix VectorType; typedef Matrix CornersType; typedef Transform IsometryTransform; typedef Transform AffineTransform; const Index dim = box.dim(); const VectorType Zero = VectorType::Zero(); const VectorType Ones = VectorType::Ones(); VectorType minPoint = -2 * Ones; minPoint[1] = 1; VectorType maxPoint = Zero; maxPoint[1] = 3; BoxType c(minPoint, maxPoint); // ((-2, 1, -2), (0, 3, 0)) VectorType cornerBL = (c.min)(); VectorType cornerTR = (c.max)(); VectorType cornerBR = (c.min)(); cornerBR[0] = cornerTR[0]; VectorType cornerTL = (c.max)(); cornerTL[0] = cornerBL[0]; NonInteger angle = NonInteger(EIGEN_PI/3); Rotation rot = rotate(angle); IsometryTransform tf2; tf2.setIdentity(); tf2.rotate(rot); c.transform(tf2); // rotate by 60 deg -> box((-3.59, -1.23, -2), (-0.86, 1.5, 0)) cornerBL = tf2 * cornerBL; cornerBR = tf2 * cornerBR; cornerTL = tf2 * cornerTL; cornerTR = tf2 * cornerTR; VectorType minCorner = Ones * Scalar(-2); VectorType maxCorner = Zero; minCorner[0] = (min)((min)(cornerBL[0], cornerBR[0]), (min)(cornerTL[0], cornerTR[0])); maxCorner[0] = (max)((max)(cornerBL[0], cornerBR[0]), (max)(cornerTL[0], cornerTR[0])); minCorner[1] = (min)((min)(cornerBL[1], cornerBR[1]), (min)(cornerTL[1], cornerTR[1])); maxCorner[1] = (max)((max)(cornerBL[1], cornerBR[1]), (max)(cornerTL[1], cornerTR[1])); for (Index d = 2; d < dim; ++d) VERIFY_IS_APPROX(c.sizes()[d], Scalar(2)); VERIFY_IS_APPROX((c.min)(), minCorner); VERIFY_IS_APPROX((c.max)(), maxCorner); VectorType minCornerValue = Ones * Scalar(-2); VectorType maxCornerValue = Zero; minCornerValue[0] = Scalar(Scalar(-sqrt(2*2 + 3*3)) * Scalar(cos(Scalar(atan(2.0/3.0)) - angle/2))); minCornerValue[1] = Scalar(Scalar(-sqrt(1*1 + 2*2)) * Scalar(sin(Scalar(atan(2.0/1.0)) - angle/2))); maxCornerValue[0] = Scalar(-sin(angle)); maxCornerValue[1] = Scalar(3 * cos(angle)); VERIFY_IS_APPROX((c.min)(), minCornerValue); VERIFY_IS_APPROX((c.max)(), maxCornerValue); // randomized test - translate and rotate the box and compare to a box made of transformed vertices for (size_t i = 0; i < 10; ++i) { for (Index d = 0; d < dim; ++d) { minCorner[d] = internal::random(-10,10); maxCorner[d] = minCorner[d] + internal::random(0, 10); } c = BoxType(minCorner, maxCorner); CornersType corners = boxGetCorners(minCorner, maxCorner); typename AffineTransform::LinearMatrixType rotation = randomRotationMatrix(); tf2.setIdentity(); tf2.rotate(rotation); tf2.translate(VectorType::Random()); c.transform(tf2); corners = tf2 * corners; minCorner = corners.rowwise().minCoeff(); maxCorner = corners.rowwise().maxCoeff(); VERIFY_IS_APPROX((c.min)(), minCorner); VERIFY_IS_APPROX((c.max)(), maxCorner); } // randomized test - transform the box with a random affine matrix and compare to a box made of transformed vertices for (size_t i = 0; i < 10; ++i) { for (Index d = 0; d < dim; ++d) { minCorner[d] = internal::random(-10,10); maxCorner[d] = minCorner[d] + internal::random(0, 10); } c = BoxType(minCorner, maxCorner); CornersType corners = boxGetCorners(minCorner, maxCorner); AffineTransform atf = AffineTransform::Identity(); atf.linearExt() = AffineTransform::LinearPart::Random(); atf.translate(VectorType::Random()); c.transform(atf); corners = atf * corners; minCorner = corners.rowwise().minCoeff(); maxCorner = corners.rowwise().maxCoeff(); VERIFY_IS_APPROX((c.min)(), minCorner); VERIFY_IS_APPROX((c.max)(), maxCorner); } } template void alignedboxCastTests(const BoxType& box) { // casting typedef typename BoxType::Scalar Scalar; typedef Matrix VectorType; const Index dim = box.dim(); VectorType p0 = VectorType::Random(dim); VectorType p1 = VectorType::Random(dim); BoxType b0(dim); b0.extend(p0); b0.extend(p1); const int Dim = BoxType::AmbientDimAtCompileTime; typedef typename GetDifferentType::type OtherScalar; AlignedBox hp1f = b0.template cast(); VERIFY_IS_APPROX(hp1f.template cast(),b0); AlignedBox hp1d = b0.template cast(); VERIFY_IS_APPROX(hp1d.template cast(),b0); } void specificTest1() { Vector2f m; m << -1.0f, -2.0f; Vector2f M; M << 1.0f, 5.0f; typedef AlignedBox2f BoxType; BoxType box( m, M ); Vector2f sides = M-m; VERIFY_IS_APPROX(sides, box.sizes() ); VERIFY_IS_APPROX(sides[1], box.sizes()[1] ); VERIFY_IS_APPROX(sides[1], box.sizes().maxCoeff() ); VERIFY_IS_APPROX(sides[0], box.sizes().minCoeff() ); VERIFY_IS_APPROX( 14.0f, box.volume() ); VERIFY_IS_APPROX( 53.0f, box.diagonal().squaredNorm() ); VERIFY_IS_APPROX( std::sqrt( 53.0f ), box.diagonal().norm() ); VERIFY_IS_APPROX( m, box.corner( BoxType::BottomLeft ) ); VERIFY_IS_APPROX( M, box.corner( BoxType::TopRight ) ); Vector2f bottomRight; bottomRight << M[0], m[1]; Vector2f topLeft; topLeft << m[0], M[1]; VERIFY_IS_APPROX( bottomRight, box.corner( BoxType::BottomRight ) ); VERIFY_IS_APPROX( topLeft, box.corner( BoxType::TopLeft ) ); } void specificTest2() { Vector3i m; m << -1, -2, 0; Vector3i M; M << 1, 5, 3; typedef AlignedBox3i BoxType; BoxType box( m, M ); Vector3i sides = M-m; VERIFY_IS_APPROX(sides, box.sizes() ); VERIFY_IS_APPROX(sides[1], box.sizes()[1] ); VERIFY_IS_APPROX(sides[1], box.sizes().maxCoeff() ); VERIFY_IS_APPROX(sides[0], box.sizes().minCoeff() ); VERIFY_IS_APPROX( 42, box.volume() ); VERIFY_IS_APPROX( 62, box.diagonal().squaredNorm() ); VERIFY_IS_APPROX( m, box.corner( BoxType::BottomLeftFloor ) ); VERIFY_IS_APPROX( M, box.corner( BoxType::TopRightCeil ) ); Vector3i bottomRightFloor; bottomRightFloor << M[0], m[1], m[2]; Vector3i topLeftFloor; topLeftFloor << m[0], M[1], m[2]; VERIFY_IS_APPROX( bottomRightFloor, box.corner( BoxType::BottomRightFloor ) ); VERIFY_IS_APPROX( topLeftFloor, box.corner( BoxType::TopLeftFloor ) ); } EIGEN_DECLARE_TEST(geo_alignedbox) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( (alignedboxNonIntegralRotatable(AlignedBox2f(), &rotate2D)) ); CALL_SUBTEST_2( alignedboxCastTests(AlignedBox2f()) ); CALL_SUBTEST_3( (alignedboxNonIntegralRotatable(AlignedBox3f(), &rotate3DZAxis)) ); CALL_SUBTEST_4( alignedboxCastTests(AlignedBox3f()) ); CALL_SUBTEST_5( (alignedboxNonIntegralRotatable(AlignedBox4d(), &rotate4DZWAxis)) ); CALL_SUBTEST_6( alignedboxCastTests(AlignedBox4d()) ); CALL_SUBTEST_7( alignedboxTranslatable(AlignedBox1d()) ); CALL_SUBTEST_8( alignedboxCastTests(AlignedBox1d()) ); CALL_SUBTEST_9( alignedboxTranslatable(AlignedBox1i()) ); CALL_SUBTEST_10( (alignedboxRotatable(AlignedBox2i(), &rotate2DIntegral)) ); CALL_SUBTEST_11( (alignedboxRotatable(AlignedBox3i(), &rotate3DZAxisIntegral)) ); CALL_SUBTEST_14( alignedbox(AlignedBox(4)) ); } CALL_SUBTEST_12( specificTest1() ); CALL_SUBTEST_13( specificTest2() ); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/geo_eulerangles.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include #include #include template void verify_euler(const Matrix& ea, int i, int j, int k) { typedef Matrix Matrix3; typedef Matrix Vector3; typedef AngleAxis AngleAxisx; using std::abs; Matrix3 m(AngleAxisx(ea[0], Vector3::Unit(i)) * AngleAxisx(ea[1], Vector3::Unit(j)) * AngleAxisx(ea[2], Vector3::Unit(k))); Vector3 eabis = m.eulerAngles(i, j, k); Matrix3 mbis(AngleAxisx(eabis[0], Vector3::Unit(i)) * AngleAxisx(eabis[1], Vector3::Unit(j)) * AngleAxisx(eabis[2], Vector3::Unit(k))); VERIFY_IS_APPROX(m, mbis); /* If I==K, and ea[1]==0, then there no unique solution. */ /* The remark apply in the case where I!=K, and |ea[1]| is close to pi/2. */ if( (i!=k || ea[1]!=0) && (i==k || !internal::isApprox(abs(ea[1]),Scalar(EIGEN_PI/2),test_precision())) ) VERIFY((ea-eabis).norm() <= test_precision()); // approx_or_less_than does not work for 0 VERIFY(0 < eabis[0] || test_isMuchSmallerThan(eabis[0], Scalar(1))); VERIFY_IS_APPROX_OR_LESS_THAN(eabis[0], Scalar(EIGEN_PI)); VERIFY_IS_APPROX_OR_LESS_THAN(-Scalar(EIGEN_PI), eabis[1]); VERIFY_IS_APPROX_OR_LESS_THAN(eabis[1], Scalar(EIGEN_PI)); VERIFY_IS_APPROX_OR_LESS_THAN(-Scalar(EIGEN_PI), eabis[2]); VERIFY_IS_APPROX_OR_LESS_THAN(eabis[2], Scalar(EIGEN_PI)); } template void check_all_var(const Matrix& ea) { verify_euler(ea, 0,1,2); verify_euler(ea, 0,1,0); verify_euler(ea, 0,2,1); verify_euler(ea, 0,2,0); verify_euler(ea, 1,2,0); verify_euler(ea, 1,2,1); verify_euler(ea, 1,0,2); verify_euler(ea, 1,0,1); verify_euler(ea, 2,0,1); verify_euler(ea, 2,0,2); verify_euler(ea, 2,1,0); verify_euler(ea, 2,1,2); } template void eulerangles() { typedef Matrix Matrix3; typedef Matrix Vector3; typedef Array Array3; typedef Quaternion Quaternionx; typedef AngleAxis AngleAxisx; Scalar a = internal::random(-Scalar(EIGEN_PI), Scalar(EIGEN_PI)); Quaternionx q1; q1 = AngleAxisx(a, Vector3::Random().normalized()); Matrix3 m; m = q1; Vector3 ea = m.eulerAngles(0,1,2); check_all_var(ea); ea = m.eulerAngles(0,1,0); check_all_var(ea); // Check with purely random Quaternion: q1.coeffs() = Quaternionx::Coefficients::Random().normalized(); m = q1; ea = m.eulerAngles(0,1,2); check_all_var(ea); ea = m.eulerAngles(0,1,0); check_all_var(ea); // Check with random angles in range [0:pi]x[-pi:pi]x[-pi:pi]. ea = (Array3::Random() + Array3(1,0,0))*Scalar(EIGEN_PI)*Array3(0.5,1,1); check_all_var(ea); ea[2] = ea[0] = internal::random(0,Scalar(EIGEN_PI)); check_all_var(ea); ea[0] = ea[1] = internal::random(0,Scalar(EIGEN_PI)); check_all_var(ea); ea[1] = 0; check_all_var(ea); ea.head(2).setZero(); check_all_var(ea); ea.setZero(); check_all_var(ea); } EIGEN_DECLARE_TEST(geo_eulerangles) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( eulerangles() ); CALL_SUBTEST_2( eulerangles() ); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/geo_homogeneous.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include template void homogeneous(void) { /* this test covers the following files: Homogeneous.h */ typedef Matrix MatrixType; typedef Matrix VectorType; typedef Matrix HMatrixType; typedef Matrix HVectorType; typedef Matrix T1MatrixType; typedef Matrix T2MatrixType; typedef Matrix T3MatrixType; VectorType v0 = VectorType::Random(), ones = VectorType::Ones(); HVectorType hv0 = HVectorType::Random(); MatrixType m0 = MatrixType::Random(); HMatrixType hm0 = HMatrixType::Random(); hv0 << v0, 1; VERIFY_IS_APPROX(v0.homogeneous(), hv0); VERIFY_IS_APPROX(v0, hv0.hnormalized()); VERIFY_IS_APPROX(v0.homogeneous().sum(), hv0.sum()); VERIFY_IS_APPROX(v0.homogeneous().minCoeff(), hv0.minCoeff()); VERIFY_IS_APPROX(v0.homogeneous().maxCoeff(), hv0.maxCoeff()); hm0 << m0, ones.transpose(); VERIFY_IS_APPROX(m0.colwise().homogeneous(), hm0); VERIFY_IS_APPROX(m0, hm0.colwise().hnormalized()); hm0.row(Size-1).setRandom(); for(int j=0; j aff; Transform caff; Transform proj; Matrix pts; Matrix pts1, pts2; aff.affine().setRandom(); proj = caff = aff; pts.setRandom(Size,internal::random(1,20)); pts1 = pts.colwise().homogeneous(); VERIFY_IS_APPROX(aff * pts.colwise().homogeneous(), (aff * pts1).colwise().hnormalized()); VERIFY_IS_APPROX(caff * pts.colwise().homogeneous(), (caff * pts1).colwise().hnormalized()); VERIFY_IS_APPROX(proj * pts.colwise().homogeneous(), (proj * pts1)); VERIFY_IS_APPROX((aff * pts1).colwise().hnormalized(), aff * pts); VERIFY_IS_APPROX((caff * pts1).colwise().hnormalized(), caff * pts); pts2 = pts1; pts2.row(Size).setRandom(); VERIFY_IS_APPROX((aff * pts2).colwise().hnormalized(), aff * pts2.colwise().hnormalized()); VERIFY_IS_APPROX((caff * pts2).colwise().hnormalized(), caff * pts2.colwise().hnormalized()); VERIFY_IS_APPROX((proj * pts2).colwise().hnormalized(), (proj * pts2.colwise().hnormalized().colwise().homogeneous()).colwise().hnormalized()); // Test combination of homogeneous VERIFY_IS_APPROX( (t2 * v0.homogeneous()).hnormalized(), (t2.template topLeftCorner() * v0 + t2.template topRightCorner()) / ((t2.template bottomLeftCorner<1,Size>()*v0).value() + t2(Size,Size)) ); VERIFY_IS_APPROX( (t2 * pts.colwise().homogeneous()).colwise().hnormalized(), (Matrix(t2 * pts1).colwise().hnormalized()) ); VERIFY_IS_APPROX( (t2 .lazyProduct( v0.homogeneous() )).hnormalized(), (t2 * v0.homogeneous()).hnormalized() ); VERIFY_IS_APPROX( (t2 .lazyProduct ( pts.colwise().homogeneous() )).colwise().hnormalized(), (t2 * pts1).colwise().hnormalized() ); VERIFY_IS_APPROX( (v0.transpose().homogeneous() .lazyProduct( t2 )).hnormalized(), (v0.transpose().homogeneous()*t2).hnormalized() ); VERIFY_IS_APPROX( (pts.transpose().rowwise().homogeneous() .lazyProduct( t2 )).rowwise().hnormalized(), (pts1.transpose()*t2).rowwise().hnormalized() ); VERIFY_IS_APPROX( (t2.template triangularView() * v0.homogeneous()).eval(), (t2.template triangularView()*hv0) ); } EIGEN_DECLARE_TEST(geo_homogeneous) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(( homogeneous() )); CALL_SUBTEST_2(( homogeneous() )); CALL_SUBTEST_3(( homogeneous() )); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/geo_hyperplane.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include #include #include template void hyperplane(const HyperplaneType& _plane) { /* this test covers the following files: Hyperplane.h */ using std::abs; const Index dim = _plane.dim(); enum { Options = HyperplaneType::Options }; typedef typename HyperplaneType::Scalar Scalar; typedef typename HyperplaneType::RealScalar RealScalar; typedef Matrix VectorType; typedef Matrix MatrixType; VectorType p0 = VectorType::Random(dim); VectorType p1 = VectorType::Random(dim); VectorType n0 = VectorType::Random(dim).normalized(); VectorType n1 = VectorType::Random(dim).normalized(); HyperplaneType pl0(n0, p0); HyperplaneType pl1(n1, p1); HyperplaneType pl2 = pl1; Scalar s0 = internal::random(); Scalar s1 = internal::random(); VERIFY_IS_APPROX( n1.dot(n1), Scalar(1) ); VERIFY_IS_MUCH_SMALLER_THAN( pl0.absDistance(p0), Scalar(1) ); if(numext::abs2(s0)>RealScalar(1e-6)) VERIFY_IS_APPROX( pl1.signedDistance(p1 + n1 * s0), s0); else VERIFY_IS_MUCH_SMALLER_THAN( abs(pl1.signedDistance(p1 + n1 * s0) - s0), Scalar(1) ); VERIFY_IS_MUCH_SMALLER_THAN( pl1.signedDistance(pl1.projection(p0)), Scalar(1) ); VERIFY_IS_MUCH_SMALLER_THAN( pl1.absDistance(p1 + pl1.normal().unitOrthogonal() * s1), Scalar(1) ); // transform if (!NumTraits::IsComplex) { MatrixType rot = MatrixType::Random(dim,dim).householderQr().householderQ(); DiagonalMatrix scaling(VectorType::Random()); Translation translation(VectorType::Random()); while(scaling.diagonal().cwiseAbs().minCoeff()::type OtherScalar; Hyperplane hp1f = pl1.template cast(); VERIFY_IS_APPROX(hp1f.template cast(),pl1); Hyperplane hp1d = pl1.template cast(); VERIFY_IS_APPROX(hp1d.template cast(),pl1); } template void lines() { using std::abs; typedef Hyperplane HLine; typedef ParametrizedLine PLine; typedef Matrix Vector; typedef Matrix CoeffsType; for(int i = 0; i < 10; i++) { Vector center = Vector::Random(); Vector u = Vector::Random(); Vector v = Vector::Random(); Scalar a = internal::random(); while (abs(a-1) < Scalar(1e-4)) a = internal::random(); while (u.norm() < Scalar(1e-4)) u = Vector::Random(); while (v.norm() < Scalar(1e-4)) v = Vector::Random(); HLine line_u = HLine::Through(center + u, center + a*u); HLine line_v = HLine::Through(center + v, center + a*v); // the line equations should be normalized so that a^2+b^2=1 VERIFY_IS_APPROX(line_u.normal().norm(), Scalar(1)); VERIFY_IS_APPROX(line_v.normal().norm(), Scalar(1)); Vector result = line_u.intersection(line_v); // the lines should intersect at the point we called "center" if(abs(a-1) > Scalar(1e-2) && abs(v.normalized().dot(u.normalized())) void planes() { using std::abs; typedef Hyperplane Plane; typedef Matrix Vector; for(int i = 0; i < 10; i++) { Vector v0 = Vector::Random(); Vector v1(v0), v2(v0); if(internal::random(0,1)>0.25) v1 += Vector::Random(); if(internal::random(0,1)>0.25) v2 += v1 * std::pow(internal::random(0,1),internal::random(1,16)); if(internal::random(0,1)>0.25) v2 += Vector::Random() * std::pow(internal::random(0,1),internal::random(1,16)); Plane p0 = Plane::Through(v0, v1, v2); VERIFY_IS_APPROX(p0.normal().norm(), Scalar(1)); VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v0), Scalar(1)); VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v1), Scalar(1)); VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v2), Scalar(1)); } } template void hyperplane_alignment() { typedef Hyperplane Plane3a; typedef Hyperplane Plane3u; EIGEN_ALIGN_MAX Scalar array1[4]; EIGEN_ALIGN_MAX Scalar array2[4]; EIGEN_ALIGN_MAX Scalar array3[4+1]; Scalar* array3u = array3+1; Plane3a *p1 = ::new(reinterpret_cast(array1)) Plane3a; Plane3u *p2 = ::new(reinterpret_cast(array2)) Plane3u; Plane3u *p3 = ::new(reinterpret_cast(array3u)) Plane3u; p1->coeffs().setRandom(); *p2 = *p1; *p3 = *p1; VERIFY_IS_APPROX(p1->coeffs(), p2->coeffs()); VERIFY_IS_APPROX(p1->coeffs(), p3->coeffs()); } EIGEN_DECLARE_TEST(geo_hyperplane) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( hyperplane(Hyperplane()) ); CALL_SUBTEST_2( hyperplane(Hyperplane()) ); CALL_SUBTEST_2( hyperplane(Hyperplane()) ); CALL_SUBTEST_2( hyperplane_alignment() ); CALL_SUBTEST_3( hyperplane(Hyperplane()) ); CALL_SUBTEST_4( hyperplane(Hyperplane,5>()) ); CALL_SUBTEST_1( lines() ); CALL_SUBTEST_3( lines() ); CALL_SUBTEST_2( planes() ); CALL_SUBTEST_5( planes() ); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/geo_orthomethods.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include #include #include /* this test covers the following files: Geometry/OrthoMethods.h */ template void orthomethods_3() { typedef typename NumTraits::Real RealScalar; typedef Matrix Matrix3; typedef Matrix Vector3; typedef Matrix Vector4; Vector3 v0 = Vector3::Random(), v1 = Vector3::Random(), v2 = Vector3::Random(); // cross product VERIFY_IS_MUCH_SMALLER_THAN(v1.cross(v2).dot(v1), Scalar(1)); VERIFY_IS_MUCH_SMALLER_THAN(v1.dot(v1.cross(v2)), Scalar(1)); VERIFY_IS_MUCH_SMALLER_THAN(v1.cross(v2).dot(v2), Scalar(1)); VERIFY_IS_MUCH_SMALLER_THAN(v2.dot(v1.cross(v2)), Scalar(1)); VERIFY_IS_MUCH_SMALLER_THAN(v1.cross(Vector3::Random()).dot(v1), Scalar(1)); Matrix3 mat3; mat3 << v0.normalized(), (v0.cross(v1)).normalized(), (v0.cross(v1).cross(v0)).normalized(); VERIFY(mat3.isUnitary()); mat3.setRandom(); VERIFY_IS_APPROX(v0.cross(mat3*v1), -(mat3*v1).cross(v0)); VERIFY_IS_APPROX(v0.cross(mat3.lazyProduct(v1)), -(mat3.lazyProduct(v1)).cross(v0)); // colwise/rowwise cross product mat3.setRandom(); Vector3 vec3 = Vector3::Random(); Matrix3 mcross; int i = internal::random(0,2); mcross = mat3.colwise().cross(vec3); VERIFY_IS_APPROX(mcross.col(i), mat3.col(i).cross(vec3)); VERIFY_IS_MUCH_SMALLER_THAN((mat3.adjoint() * mat3.colwise().cross(vec3)).diagonal().cwiseAbs().sum(), Scalar(1)); VERIFY_IS_MUCH_SMALLER_THAN((mat3.adjoint() * mat3.colwise().cross(Vector3::Random())).diagonal().cwiseAbs().sum(), Scalar(1)); VERIFY_IS_MUCH_SMALLER_THAN((vec3.adjoint() * mat3.colwise().cross(vec3)).cwiseAbs().sum(), Scalar(1)); VERIFY_IS_MUCH_SMALLER_THAN((vec3.adjoint() * Matrix3::Random().colwise().cross(vec3)).cwiseAbs().sum(), Scalar(1)); mcross = mat3.rowwise().cross(vec3); VERIFY_IS_APPROX(mcross.row(i), mat3.row(i).cross(vec3)); // cross3 Vector4 v40 = Vector4::Random(), v41 = Vector4::Random(), v42 = Vector4::Random(); v40.w() = v41.w() = v42.w() = 0; v42.template head<3>() = v40.template head<3>().cross(v41.template head<3>()); VERIFY_IS_APPROX(v40.cross3(v41), v42); VERIFY_IS_MUCH_SMALLER_THAN(v40.cross3(Vector4::Random()).dot(v40), Scalar(1)); // check mixed product typedef Matrix RealVector3; RealVector3 rv1 = RealVector3::Random(); VERIFY_IS_APPROX(v1.cross(rv1.template cast()), v1.cross(rv1)); VERIFY_IS_APPROX(rv1.template cast().cross(v1), rv1.cross(v1)); } template void orthomethods(int size=Size) { typedef typename NumTraits::Real RealScalar; typedef Matrix VectorType; typedef Matrix Matrix3N; typedef Matrix MatrixN3; typedef Matrix Vector3; VectorType v0 = VectorType::Random(size); // unitOrthogonal VERIFY_IS_MUCH_SMALLER_THAN(v0.unitOrthogonal().dot(v0), Scalar(1)); VERIFY_IS_APPROX(v0.unitOrthogonal().norm(), RealScalar(1)); if (size>=3) { v0.template head<2>().setZero(); v0.tail(size-2).setRandom(); VERIFY_IS_MUCH_SMALLER_THAN(v0.unitOrthogonal().dot(v0), Scalar(1)); VERIFY_IS_APPROX(v0.unitOrthogonal().norm(), RealScalar(1)); } // colwise/rowwise cross product Vector3 vec3 = Vector3::Random(); int i = internal::random(0,size-1); Matrix3N mat3N(3,size), mcross3N(3,size); mat3N.setRandom(); mcross3N = mat3N.colwise().cross(vec3); VERIFY_IS_APPROX(mcross3N.col(i), mat3N.col(i).cross(vec3)); MatrixN3 matN3(size,3), mcrossN3(size,3); matN3.setRandom(); mcrossN3 = matN3.rowwise().cross(vec3); VERIFY_IS_APPROX(mcrossN3.row(i), matN3.row(i).cross(vec3)); } EIGEN_DECLARE_TEST(geo_orthomethods) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( orthomethods_3() ); CALL_SUBTEST_2( orthomethods_3() ); CALL_SUBTEST_4( orthomethods_3 >() ); CALL_SUBTEST_1( (orthomethods()) ); CALL_SUBTEST_2( (orthomethods()) ); CALL_SUBTEST_1( (orthomethods()) ); CALL_SUBTEST_2( (orthomethods()) ); CALL_SUBTEST_3( (orthomethods()) ); CALL_SUBTEST_4( (orthomethods,8>()) ); CALL_SUBTEST_5( (orthomethods(36)) ); CALL_SUBTEST_6( (orthomethods(35)) ); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/geo_parametrizedline.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include #include #include template void parametrizedline(const LineType& _line) { /* this test covers the following files: ParametrizedLine.h */ using std::abs; const Index dim = _line.dim(); typedef typename LineType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef Matrix VectorType; typedef Hyperplane HyperplaneType; typedef Matrix MatrixType; VectorType p0 = VectorType::Random(dim); VectorType p1 = VectorType::Random(dim); VectorType d0 = VectorType::Random(dim).normalized(); LineType l0(p0, d0); Scalar s0 = internal::random(); Scalar s1 = abs(internal::random()); VERIFY_IS_MUCH_SMALLER_THAN( l0.distance(p0), RealScalar(1) ); VERIFY_IS_MUCH_SMALLER_THAN( l0.distance(p0+s0*d0), RealScalar(1) ); VERIFY_IS_APPROX( (l0.projection(p1)-p1).norm(), l0.distance(p1) ); VERIFY_IS_MUCH_SMALLER_THAN( l0.distance(l0.projection(p1)), RealScalar(1) ); VERIFY_IS_APPROX( Scalar(l0.distance((p0+s0*d0) + d0.unitOrthogonal() * s1)), s1 ); // casting const int Dim = LineType::AmbientDimAtCompileTime; typedef typename GetDifferentType::type OtherScalar; ParametrizedLine hp1f = l0.template cast(); VERIFY_IS_APPROX(hp1f.template cast(),l0); ParametrizedLine hp1d = l0.template cast(); VERIFY_IS_APPROX(hp1d.template cast(),l0); // intersections VectorType p2 = VectorType::Random(dim); VectorType n2 = VectorType::Random(dim).normalized(); HyperplaneType hp(p2,n2); Scalar t = l0.intersectionParameter(hp); VectorType pi = l0.pointAt(t); VERIFY_IS_MUCH_SMALLER_THAN(hp.signedDistance(pi), RealScalar(1)); VERIFY_IS_MUCH_SMALLER_THAN(l0.distance(pi), RealScalar(1)); VERIFY_IS_APPROX(l0.intersectionPoint(hp), pi); // transform if (!NumTraits::IsComplex) { MatrixType rot = MatrixType::Random(dim,dim).householderQr().householderQ(); DiagonalMatrix scaling(VectorType::Random()); Translation translation(VectorType::Random()); while(scaling.diagonal().cwiseAbs().minCoeff() void parametrizedline_alignment() { typedef ParametrizedLine Line4a; typedef ParametrizedLine Line4u; EIGEN_ALIGN_MAX Scalar array1[16]; EIGEN_ALIGN_MAX Scalar array2[16]; EIGEN_ALIGN_MAX Scalar array3[16+1]; Scalar* array3u = array3+1; Line4a *p1 = ::new(reinterpret_cast(array1)) Line4a; Line4u *p2 = ::new(reinterpret_cast(array2)) Line4u; Line4u *p3 = ::new(reinterpret_cast(array3u)) Line4u; p1->origin().setRandom(); p1->direction().setRandom(); *p2 = *p1; *p3 = *p1; VERIFY_IS_APPROX(p1->origin(), p2->origin()); VERIFY_IS_APPROX(p1->origin(), p3->origin()); VERIFY_IS_APPROX(p1->direction(), p2->direction()); VERIFY_IS_APPROX(p1->direction(), p3->direction()); } EIGEN_DECLARE_TEST(geo_parametrizedline) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( parametrizedline(ParametrizedLine()) ); CALL_SUBTEST_2( parametrizedline(ParametrizedLine()) ); CALL_SUBTEST_2( parametrizedline_alignment() ); CALL_SUBTEST_3( parametrizedline(ParametrizedLine()) ); CALL_SUBTEST_3( parametrizedline_alignment() ); CALL_SUBTEST_4( parametrizedline(ParametrizedLine,5>()) ); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/geo_quaternion.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // Copyright (C) 2009 Mathieu Gautier // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include #include #include #include "AnnoyingScalar.h" template T bounded_acos(T v) { using std::acos; using std::min; using std::max; return acos((max)(T(-1),(min)(v,T(1)))); } template void check_slerp(const QuatType& q0, const QuatType& q1) { using std::abs; typedef typename QuatType::Scalar Scalar; typedef AngleAxis AA; Scalar largeEps = test_precision(); Scalar theta_tot = AA(q1*q0.inverse()).angle(); if(theta_tot>Scalar(EIGEN_PI)) theta_tot = Scalar(2.)*Scalar(EIGEN_PI)-theta_tot; for(Scalar t=0; t<=Scalar(1.001); t+=Scalar(0.1)) { QuatType q = q0.slerp(t,q1); Scalar theta = AA(q*q0.inverse()).angle(); VERIFY(abs(q.norm() - 1) < largeEps); if(theta_tot==0) VERIFY(theta_tot==0); else VERIFY(abs(theta - t * theta_tot) < largeEps); } } template void quaternion(void) { /* this test covers the following files: Quaternion.h */ using std::abs; typedef Matrix Vector3; typedef Matrix Matrix3; typedef Quaternion Quaternionx; typedef AngleAxis AngleAxisx; Scalar largeEps = test_precision(); if (internal::is_same::value) largeEps = Scalar(1e-3); Scalar eps = internal::random() * Scalar(1e-2); Vector3 v0 = Vector3::Random(), v1 = Vector3::Random(), v2 = Vector3::Random(), v3 = Vector3::Random(); Scalar a = internal::random(-Scalar(EIGEN_PI), Scalar(EIGEN_PI)), b = internal::random(-Scalar(EIGEN_PI), Scalar(EIGEN_PI)); // Quaternion: Identity(), setIdentity(); Quaternionx q1, q2; q2.setIdentity(); VERIFY_IS_APPROX(Quaternionx(Quaternionx::Identity()).coeffs(), q2.coeffs()); q1.coeffs().setRandom(); VERIFY_IS_APPROX(q1.coeffs(), (q1*q2).coeffs()); #ifndef EIGEN_NO_IO // Printing std::ostringstream ss; ss << q2; VERIFY(ss.str() == "0i + 0j + 0k + 1"); #endif // concatenation q1 *= q2; q1 = AngleAxisx(a, v0.normalized()); q2 = AngleAxisx(a, v1.normalized()); // angular distance Scalar refangle = abs(AngleAxisx(q1.inverse()*q2).angle()); if (refangle>Scalar(EIGEN_PI)) refangle = Scalar(2)*Scalar(EIGEN_PI) - refangle; if((q1.coeffs()-q2.coeffs()).norm() > Scalar(10)*largeEps) { VERIFY_IS_MUCH_SMALLER_THAN(abs(q1.angularDistance(q2) - refangle), Scalar(1)); } // rotation matrix conversion VERIFY_IS_APPROX(q1 * v2, q1.toRotationMatrix() * v2); VERIFY_IS_APPROX(q1 * q2 * v2, q1.toRotationMatrix() * q2.toRotationMatrix() * v2); VERIFY( (q2*q1).isApprox(q1*q2, largeEps) || !(q2 * q1 * v2).isApprox(q1.toRotationMatrix() * q2.toRotationMatrix() * v2)); q2 = q1.toRotationMatrix(); VERIFY_IS_APPROX(q1*v1,q2*v1); Matrix3 rot1(q1); VERIFY_IS_APPROX(q1*v1,rot1*v1); Quaternionx q3(rot1.transpose()*rot1); VERIFY_IS_APPROX(q3*v1,v1); // angle-axis conversion AngleAxisx aa = AngleAxisx(q1); VERIFY_IS_APPROX(q1 * v1, Quaternionx(aa) * v1); // Do not execute the test if the rotation angle is almost zero, or // the rotation axis and v1 are almost parallel. if (abs(aa.angle()) > Scalar(5)*test_precision() && (aa.axis() - v1.normalized()).norm() < Scalar(1.99) && (aa.axis() + v1.normalized()).norm() < Scalar(1.99)) { VERIFY_IS_NOT_APPROX(q1 * v1, Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1); } // from two vector creation VERIFY_IS_APPROX( v2.normalized(),(q2.setFromTwoVectors(v1, v2)*v1).normalized()); VERIFY_IS_APPROX( v1.normalized(),(q2.setFromTwoVectors(v1, v1)*v1).normalized()); VERIFY_IS_APPROX(-v1.normalized(),(q2.setFromTwoVectors(v1,-v1)*v1).normalized()); if (internal::is_same::value) { v3 = (v1.array()+eps).matrix(); VERIFY_IS_APPROX( v3.normalized(),(q2.setFromTwoVectors(v1, v3)*v1).normalized()); VERIFY_IS_APPROX(-v3.normalized(),(q2.setFromTwoVectors(v1,-v3)*v1).normalized()); } // from two vector creation static function VERIFY_IS_APPROX( v2.normalized(),(Quaternionx::FromTwoVectors(v1, v2)*v1).normalized()); VERIFY_IS_APPROX( v1.normalized(),(Quaternionx::FromTwoVectors(v1, v1)*v1).normalized()); VERIFY_IS_APPROX(-v1.normalized(),(Quaternionx::FromTwoVectors(v1,-v1)*v1).normalized()); if (internal::is_same::value) { v3 = (v1.array()+eps).matrix(); VERIFY_IS_APPROX( v3.normalized(),(Quaternionx::FromTwoVectors(v1, v3)*v1).normalized()); VERIFY_IS_APPROX(-v3.normalized(),(Quaternionx::FromTwoVectors(v1,-v3)*v1).normalized()); } // inverse and conjugate VERIFY_IS_APPROX(q1 * (q1.inverse() * v1), v1); VERIFY_IS_APPROX(q1 * (q1.conjugate() * v1), v1); // test casting Quaternion q1f = q1.template cast(); VERIFY_IS_APPROX(q1f.template cast(),q1); Quaternion q1d = q1.template cast(); VERIFY_IS_APPROX(q1d.template cast(),q1); // test bug 369 - improper alignment. Quaternionx *q = new Quaternionx; delete q; q1 = Quaternionx::UnitRandom(); q2 = Quaternionx::UnitRandom(); check_slerp(q1,q2); q1 = AngleAxisx(b, v1.normalized()); q2 = AngleAxisx(b+Scalar(EIGEN_PI), v1.normalized()); check_slerp(q1,q2); q1 = AngleAxisx(b, v1.normalized()); q2 = AngleAxisx(-b, -v1.normalized()); check_slerp(q1,q2); q1 = Quaternionx::UnitRandom(); q2.coeffs() = -q1.coeffs(); check_slerp(q1,q2); } template void mapQuaternion(void){ typedef Map, Aligned> MQuaternionA; typedef Map, Aligned> MCQuaternionA; typedef Map > MQuaternionUA; typedef Map > MCQuaternionUA; typedef Quaternion Quaternionx; typedef Matrix Vector3; typedef AngleAxis AngleAxisx; Vector3 v0 = Vector3::Random(), v1 = Vector3::Random(); Scalar a = internal::random(-Scalar(EIGEN_PI), Scalar(EIGEN_PI)); EIGEN_ALIGN_MAX Scalar array1[4]; EIGEN_ALIGN_MAX Scalar array2[4]; EIGEN_ALIGN_MAX Scalar array3[4+1]; Scalar* array3unaligned = array3+1; MQuaternionA mq1(array1); MCQuaternionA mcq1(array1); MQuaternionA mq2(array2); MQuaternionUA mq3(array3unaligned); MCQuaternionUA mcq3(array3unaligned); // std::cerr << array1 << " " << array2 << " " << array3 << "\n"; mq1 = AngleAxisx(a, v0.normalized()); mq2 = mq1; mq3 = mq1; Quaternionx q1 = mq1; Quaternionx q2 = mq2; Quaternionx q3 = mq3; Quaternionx q4 = MCQuaternionUA(array3unaligned); VERIFY_IS_APPROX(q1.coeffs(), q2.coeffs()); VERIFY_IS_APPROX(q1.coeffs(), q3.coeffs()); VERIFY_IS_APPROX(q4.coeffs(), q3.coeffs()); VERIFY_IS_APPROX(mq1 * (mq1.inverse() * v1), v1); VERIFY_IS_APPROX(mq1 * (mq1.conjugate() * v1), v1); VERIFY_IS_APPROX(mcq1 * (mcq1.inverse() * v1), v1); VERIFY_IS_APPROX(mcq1 * (mcq1.conjugate() * v1), v1); VERIFY_IS_APPROX(mq3 * (mq3.inverse() * v1), v1); VERIFY_IS_APPROX(mq3 * (mq3.conjugate() * v1), v1); VERIFY_IS_APPROX(mcq3 * (mcq3.inverse() * v1), v1); VERIFY_IS_APPROX(mcq3 * (mcq3.conjugate() * v1), v1); VERIFY_IS_APPROX(mq1*mq2, q1*q2); VERIFY_IS_APPROX(mq3*mq2, q3*q2); VERIFY_IS_APPROX(mcq1*mq2, q1*q2); VERIFY_IS_APPROX(mcq3*mq2, q3*q2); // Bug 1461, compilation issue with Map::w(), and other reference/constness checks: VERIFY_IS_APPROX(mcq3.coeffs().x() + mcq3.coeffs().y() + mcq3.coeffs().z() + mcq3.coeffs().w(), mcq3.coeffs().sum()); VERIFY_IS_APPROX(mcq3.x() + mcq3.y() + mcq3.z() + mcq3.w(), mcq3.coeffs().sum()); mq3.w() = 1; const Quaternionx& cq3(q3); VERIFY( &cq3.x() == &q3.x() ); const MQuaternionUA& cmq3(mq3); VERIFY( &cmq3.x() == &mq3.x() ); // FIXME the following should be ok. The problem is that currently the LValueBit flag // is used to determine whether we can return a coeff by reference or not, which is not enough for Map. //const MCQuaternionUA& cmcq3(mcq3); //VERIFY( &cmcq3.x() == &mcq3.x() ); // test cast { Quaternion q1f = mq1.template cast(); VERIFY_IS_APPROX(q1f.template cast(),mq1); Quaternion q1d = mq1.template cast(); VERIFY_IS_APPROX(q1d.template cast(),mq1); } } template void quaternionAlignment(void){ typedef Quaternion QuaternionA; typedef Quaternion QuaternionUA; EIGEN_ALIGN_MAX Scalar array1[4]; EIGEN_ALIGN_MAX Scalar array2[4]; EIGEN_ALIGN_MAX Scalar array3[4+1]; Scalar* arrayunaligned = array3+1; QuaternionA *q1 = ::new(reinterpret_cast(array1)) QuaternionA; QuaternionUA *q2 = ::new(reinterpret_cast(array2)) QuaternionUA; QuaternionUA *q3 = ::new(reinterpret_cast(arrayunaligned)) QuaternionUA; q1->coeffs().setRandom(); *q2 = *q1; *q3 = *q1; VERIFY_IS_APPROX(q1->coeffs(), q2->coeffs()); VERIFY_IS_APPROX(q1->coeffs(), q3->coeffs()); } template void check_const_correctness(const PlainObjectType&) { // there's a lot that we can't test here while still having this test compile! // the only possible approach would be to run a script trying to compile stuff and checking that it fails. // CMake can help with that. // verify that map-to-const don't have LvalueBit typedef typename internal::add_const::type ConstPlainObjectType; VERIFY( !(internal::traits >::Flags & LvalueBit) ); VERIFY( !(internal::traits >::Flags & LvalueBit) ); VERIFY( !(Map::Flags & LvalueBit) ); VERIFY( !(Map::Flags & LvalueBit) ); } #if EIGEN_HAS_RVALUE_REFERENCES // Regression for bug 1573 struct MovableClass { // The following line is a workaround for gcc 4.7 and 4.8 (see bug 1573 comments). static_assert(std::is_nothrow_move_constructible::value,""); MovableClass() = default; MovableClass(const MovableClass&) = default; MovableClass(MovableClass&&) noexcept = default; MovableClass& operator=(const MovableClass&) = default; MovableClass& operator=(MovableClass&&) = default; Quaternionf m_quat; }; #endif EIGEN_DECLARE_TEST(geo_quaternion) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(( quaternion() )); CALL_SUBTEST_1( check_const_correctness(Quaternionf()) ); CALL_SUBTEST_1(( quaternion() )); CALL_SUBTEST_1(( quaternionAlignment() )); CALL_SUBTEST_1( mapQuaternion() ); CALL_SUBTEST_2(( quaternion() )); CALL_SUBTEST_2( check_const_correctness(Quaterniond()) ); CALL_SUBTEST_2(( quaternion() )); CALL_SUBTEST_2(( quaternionAlignment() )); CALL_SUBTEST_2( mapQuaternion() ); #ifndef EIGEN_TEST_ANNOYING_SCALAR_DONT_THROW AnnoyingScalar::dont_throw = true; #endif CALL_SUBTEST_3(( quaternion() )); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/geo_transformations.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include #include #include template Matrix angleToVec(T a) { return Matrix(std::cos(a), std::sin(a)); } // This permits to workaround a bug in clang/llvm code generation. template EIGEN_DONT_INLINE void dont_over_optimize(T& x) { volatile typename T::Scalar tmp = x(0); x(0) = tmp; } template void non_projective_only() { /* this test covers the following files: Cross.h Quaternion.h, Transform.cpp */ typedef Matrix Vector3; typedef Quaternion Quaternionx; typedef AngleAxis AngleAxisx; typedef Transform Transform3; typedef DiagonalMatrix AlignedScaling3; typedef Translation Translation3; Vector3 v0 = Vector3::Random(), v1 = Vector3::Random(); Transform3 t0, t1, t2; Scalar a = internal::random(-Scalar(EIGEN_PI), Scalar(EIGEN_PI)); Quaternionx q1, q2; q1 = AngleAxisx(a, v0.normalized()); t0 = Transform3::Identity(); VERIFY_IS_APPROX(t0.matrix(), Transform3::MatrixType::Identity()); t0.linear() = q1.toRotationMatrix(); v0 << 50, 2, 1; t0.scale(v0); VERIFY_IS_APPROX( (t0 * Vector3(1,0,0)).template head<3>().norm(), v0.x()); t0.setIdentity(); t1.setIdentity(); v1 << 1, 2, 3; t0.linear() = q1.toRotationMatrix(); t0.pretranslate(v0); t0.scale(v1); t1.linear() = q1.conjugate().toRotationMatrix(); t1.prescale(v1.cwiseInverse()); t1.translate(-v0); VERIFY((t0 * t1).matrix().isIdentity(test_precision())); t1.fromPositionOrientationScale(v0, q1, v1); VERIFY_IS_APPROX(t1.matrix(), t0.matrix()); VERIFY_IS_APPROX(t1*v1, t0*v1); // translation * vector t0.setIdentity(); t0.translate(v0); VERIFY_IS_APPROX((t0 * v1).template head<3>(), Translation3(v0) * v1); // AlignedScaling * vector t0.setIdentity(); t0.scale(v0); VERIFY_IS_APPROX((t0 * v1).template head<3>(), AlignedScaling3(v0) * v1); } template void transformations() { /* this test covers the following files: Cross.h Quaternion.h, Transform.cpp */ using std::cos; using std::abs; typedef Matrix Matrix3; typedef Matrix Matrix4; typedef Matrix Vector2; typedef Matrix Vector3; typedef Matrix Vector4; typedef Quaternion Quaternionx; typedef AngleAxis AngleAxisx; typedef Transform Transform2; typedef Transform Transform3; typedef typename Transform3::MatrixType MatrixType; typedef DiagonalMatrix AlignedScaling3; typedef Translation Translation2; typedef Translation Translation3; Vector3 v0 = Vector3::Random(), v1 = Vector3::Random(); Matrix3 matrot1, m; Scalar a = internal::random(-Scalar(EIGEN_PI), Scalar(EIGEN_PI)); Scalar s0 = internal::random(), s1 = internal::random(); while(v0.norm() < test_precision()) v0 = Vector3::Random(); while(v1.norm() < test_precision()) v1 = Vector3::Random(); VERIFY_IS_APPROX(v0, AngleAxisx(a, v0.normalized()) * v0); VERIFY_IS_APPROX(-v0, AngleAxisx(Scalar(EIGEN_PI), v0.unitOrthogonal()) * v0); if(abs(cos(a)) > test_precision()) { VERIFY_IS_APPROX(cos(a)*v0.squaredNorm(), v0.dot(AngleAxisx(a, v0.unitOrthogonal()) * v0)); } m = AngleAxisx(a, v0.normalized()).toRotationMatrix().adjoint(); VERIFY_IS_APPROX(Matrix3::Identity(), m * AngleAxisx(a, v0.normalized())); VERIFY_IS_APPROX(Matrix3::Identity(), AngleAxisx(a, v0.normalized()) * m); Quaternionx q1, q2; q1 = AngleAxisx(a, v0.normalized()); q2 = AngleAxisx(a, v1.normalized()); // rotation matrix conversion matrot1 = AngleAxisx(Scalar(0.1), Vector3::UnitX()) * AngleAxisx(Scalar(0.2), Vector3::UnitY()) * AngleAxisx(Scalar(0.3), Vector3::UnitZ()); VERIFY_IS_APPROX(matrot1 * v1, AngleAxisx(Scalar(0.1), Vector3(1,0,0)).toRotationMatrix() * (AngleAxisx(Scalar(0.2), Vector3(0,1,0)).toRotationMatrix() * (AngleAxisx(Scalar(0.3), Vector3(0,0,1)).toRotationMatrix() * v1))); // angle-axis conversion AngleAxisx aa = AngleAxisx(q1); VERIFY_IS_APPROX(q1 * v1, Quaternionx(aa) * v1); // The following test is stable only if 2*angle != angle and v1 is not colinear with axis if( (abs(aa.angle()) > test_precision()) && (abs(aa.axis().dot(v1.normalized()))<(Scalar(1)-Scalar(4)*test_precision())) ) { VERIFY( !(q1 * v1).isApprox(Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1) ); } aa.fromRotationMatrix(aa.toRotationMatrix()); VERIFY_IS_APPROX(q1 * v1, Quaternionx(aa) * v1); // The following test is stable only if 2*angle != angle and v1 is not colinear with axis if( (abs(aa.angle()) > test_precision()) && (abs(aa.axis().dot(v1.normalized()))<(Scalar(1)-Scalar(4)*test_precision())) ) { VERIFY( !(q1 * v1).isApprox(Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1) ); } // AngleAxis VERIFY_IS_APPROX(AngleAxisx(a,v1.normalized()).toRotationMatrix(), Quaternionx(AngleAxisx(a,v1.normalized())).toRotationMatrix()); AngleAxisx aa1; m = q1.toRotationMatrix(); aa1 = m; VERIFY_IS_APPROX(AngleAxisx(m).toRotationMatrix(), Quaternionx(m).toRotationMatrix()); // Transform // TODO complete the tests ! a = 0; while (abs(a)(-Scalar(0.4)*Scalar(EIGEN_PI), Scalar(0.4)*Scalar(EIGEN_PI)); q1 = AngleAxisx(a, v0.normalized()); Transform3 t0, t1, t2; // first test setIdentity() and Identity() t0.setIdentity(); VERIFY_IS_APPROX(t0.matrix(), Transform3::MatrixType::Identity()); t0.matrix().setZero(); t0 = Transform3::Identity(); VERIFY_IS_APPROX(t0.matrix(), Transform3::MatrixType::Identity()); t0.setIdentity(); t1.setIdentity(); v1 << 1, 2, 3; t0.linear() = q1.toRotationMatrix(); t0.pretranslate(v0); t0.scale(v1); t1.linear() = q1.conjugate().toRotationMatrix(); t1.prescale(v1.cwiseInverse()); t1.translate(-v0); VERIFY((t0 * t1).matrix().isIdentity(test_precision())); t1.fromPositionOrientationScale(v0, q1, v1); VERIFY_IS_APPROX(t1.matrix(), t0.matrix()); t0.setIdentity(); t0.scale(v0).rotate(q1.toRotationMatrix()); t1.setIdentity(); t1.scale(v0).rotate(q1); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0.setIdentity(); t0.scale(v0).rotate(AngleAxisx(q1)); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); VERIFY_IS_APPROX(t0.scale(a).matrix(), t1.scale(Vector3::Constant(a)).matrix()); VERIFY_IS_APPROX(t0.prescale(a).matrix(), t1.prescale(Vector3::Constant(a)).matrix()); // More transform constructors, operator=, operator*= Matrix3 mat3 = Matrix3::Random(); Matrix4 mat4; mat4 << mat3 , Vector3::Zero() , Vector4::Zero().transpose(); Transform3 tmat3(mat3), tmat4(mat4); if(Mode!=int(AffineCompact)) tmat4.matrix()(3,3) = Scalar(1); VERIFY_IS_APPROX(tmat3.matrix(), tmat4.matrix()); Scalar a3 = internal::random(-Scalar(EIGEN_PI), Scalar(EIGEN_PI)); Vector3 v3 = Vector3::Random().normalized(); AngleAxisx aa3(a3, v3); Transform3 t3(aa3); Transform3 t4; t4 = aa3; VERIFY_IS_APPROX(t3.matrix(), t4.matrix()); t4.rotate(AngleAxisx(-a3,v3)); VERIFY_IS_APPROX(t4.matrix(), MatrixType::Identity()); t4 *= aa3; VERIFY_IS_APPROX(t3.matrix(), t4.matrix()); do { v3 = Vector3::Random(); dont_over_optimize(v3); } while (v3.cwiseAbs().minCoeff()::epsilon()); Translation3 tv3(v3); Transform3 t5(tv3); t4 = tv3; VERIFY_IS_APPROX(t5.matrix(), t4.matrix()); t4.translate((-v3).eval()); VERIFY_IS_APPROX(t4.matrix(), MatrixType::Identity()); t4 *= tv3; VERIFY_IS_APPROX(t5.matrix(), t4.matrix()); AlignedScaling3 sv3(v3); Transform3 t6(sv3); t4 = sv3; VERIFY_IS_APPROX(t6.matrix(), t4.matrix()); t4.scale(v3.cwiseInverse()); VERIFY_IS_APPROX(t4.matrix(), MatrixType::Identity()); t4 *= sv3; VERIFY_IS_APPROX(t6.matrix(), t4.matrix()); // matrix * transform VERIFY_IS_APPROX((t3.matrix()*t4).matrix(), (t3*t4).matrix()); // chained Transform product VERIFY_IS_APPROX(((t3*t4)*t5).matrix(), (t3*(t4*t5)).matrix()); // check that Transform product doesn't have aliasing problems t5 = t4; t5 = t5*t5; VERIFY_IS_APPROX(t5, t4*t4); // 2D transformation Transform2 t20, t21; Vector2 v20 = Vector2::Random(); Vector2 v21 = Vector2::Random(); for (int k=0; k<2; ++k) if (abs(v21[k])(a).toRotationMatrix(); VERIFY_IS_APPROX(t20.fromPositionOrientationScale(v20,a,v21).matrix(), t21.pretranslate(v20).scale(v21).matrix()); t21.setIdentity(); t21.linear() = Rotation2D(-a).toRotationMatrix(); VERIFY( (t20.fromPositionOrientationScale(v20,a,v21) * (t21.prescale(v21.cwiseInverse()).translate(-v20))).matrix().isIdentity(test_precision()) ); // Transform - new API // 3D t0.setIdentity(); t0.rotate(q1).scale(v0).translate(v0); // mat * aligned scaling and mat * translation t1 = (Matrix3(q1) * AlignedScaling3(v0)) * Translation3(v0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t1 = (Matrix3(q1) * Eigen::Scaling(v0)) * Translation3(v0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t1 = (q1 * Eigen::Scaling(v0)) * Translation3(v0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // mat * transformation and aligned scaling * translation t1 = Matrix3(q1) * (AlignedScaling3(v0) * Translation3(v0)); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0.setIdentity(); t0.scale(s0).translate(v0); t1 = Eigen::Scaling(s0) * Translation3(v0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0.prescale(s0); t1 = Eigen::Scaling(s0) * t1; VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0 = t3; t0.scale(s0); t1 = t3 * Eigen::Scaling(s0,s0,s0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0.prescale(s0); t1 = Eigen::Scaling(s0,s0,s0) * t1; VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0 = t3; t0.scale(s0); t1 = t3 * Eigen::Scaling(s0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0.prescale(s0); t1 = Eigen::Scaling(s0) * t1; VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0.setIdentity(); t0.prerotate(q1).prescale(v0).pretranslate(v0); // translation * aligned scaling and transformation * mat t1 = (Translation3(v0) * AlignedScaling3(v0)) * Transform3(q1); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // scaling * mat and translation * mat t1 = Translation3(v0) * (AlignedScaling3(v0) * Transform3(q1)); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0.setIdentity(); t0.scale(v0).translate(v0).rotate(q1); // translation * mat and aligned scaling * transformation t1 = AlignedScaling3(v0) * (Translation3(v0) * Transform3(q1)); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // transformation * aligned scaling t0.scale(v0); t1 *= AlignedScaling3(v0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t1 = AlignedScaling3(v0) * (Translation3(v0) * Transform3(q1)); t1 = t1 * v0.asDiagonal(); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // transformation * translation t0.translate(v0); t1 = t1 * Translation3(v0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // translation * transformation t0.pretranslate(v0); t1 = Translation3(v0) * t1; VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // transform * quaternion t0.rotate(q1); t1 = t1 * q1; VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // translation * quaternion t0.translate(v1).rotate(q1); t1 = t1 * (Translation3(v1) * q1); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // aligned scaling * quaternion t0.scale(v1).rotate(q1); t1 = t1 * (AlignedScaling3(v1) * q1); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // quaternion * transform t0.prerotate(q1); t1 = q1 * t1; VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // quaternion * translation t0.rotate(q1).translate(v1); t1 = t1 * (q1 * Translation3(v1)); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // quaternion * aligned scaling t0.rotate(q1).scale(v1); t1 = t1 * (q1 * AlignedScaling3(v1)); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // test transform inversion t0.setIdentity(); t0.translate(v0); do { t0.linear().setRandom(); } while(t0.linear().jacobiSvd().singularValues()(2)()); Matrix4 t044 = Matrix4::Zero(); t044(3,3) = 1; t044.block(0,0,t0.matrix().rows(),4) = t0.matrix(); VERIFY_IS_APPROX(t0.inverse(Affine).matrix(), t044.inverse().block(0,0,t0.matrix().rows(),4)); t0.setIdentity(); t0.translate(v0).rotate(q1); t044 = Matrix4::Zero(); t044(3,3) = 1; t044.block(0,0,t0.matrix().rows(),4) = t0.matrix(); VERIFY_IS_APPROX(t0.inverse(Isometry).matrix(), t044.inverse().block(0,0,t0.matrix().rows(),4)); Matrix3 mat_rotation, mat_scaling; t0.setIdentity(); t0.translate(v0).rotate(q1).scale(v1); t0.computeRotationScaling(&mat_rotation, &mat_scaling); VERIFY_IS_APPROX(t0.linear(), mat_rotation * mat_scaling); VERIFY_IS_APPROX(mat_rotation*mat_rotation.adjoint(), Matrix3::Identity()); VERIFY_IS_APPROX(mat_rotation.determinant(), Scalar(1)); t0.computeScalingRotation(&mat_scaling, &mat_rotation); VERIFY_IS_APPROX(t0.linear(), mat_scaling * mat_rotation); VERIFY_IS_APPROX(mat_rotation*mat_rotation.adjoint(), Matrix3::Identity()); VERIFY_IS_APPROX(mat_rotation.determinant(), Scalar(1)); // test casting Transform t1f = t1.template cast(); VERIFY_IS_APPROX(t1f.template cast(),t1); Transform t1d = t1.template cast(); VERIFY_IS_APPROX(t1d.template cast(),t1); Translation3 tr1(v0); Translation tr1f = tr1.template cast(); VERIFY_IS_APPROX(tr1f.template cast(),tr1); Translation tr1d = tr1.template cast(); VERIFY_IS_APPROX(tr1d.template cast(),tr1); AngleAxis aa1f = aa1.template cast(); VERIFY_IS_APPROX(aa1f.template cast(),aa1); AngleAxis aa1d = aa1.template cast(); VERIFY_IS_APPROX(aa1d.template cast(),aa1); Rotation2D r2d1(internal::random()); Rotation2D r2d1f = r2d1.template cast(); VERIFY_IS_APPROX(r2d1f.template cast(),r2d1); Rotation2D r2d1d = r2d1.template cast(); VERIFY_IS_APPROX(r2d1d.template cast(),r2d1); for(int k=0; k<100; ++k) { Scalar angle = internal::random(-100,100); Rotation2D rot2(angle); VERIFY( rot2.smallestPositiveAngle() >= 0 ); VERIFY( rot2.smallestPositiveAngle() <= Scalar(2)*Scalar(EIGEN_PI) ); VERIFY_IS_APPROX( angleToVec(rot2.smallestPositiveAngle()), angleToVec(rot2.angle()) ); VERIFY( rot2.smallestAngle() >= -Scalar(EIGEN_PI) ); VERIFY( rot2.smallestAngle() <= Scalar(EIGEN_PI) ); VERIFY_IS_APPROX( angleToVec(rot2.smallestAngle()), angleToVec(rot2.angle()) ); Matrix rot2_as_mat(rot2); Rotation2D rot3(rot2_as_mat); VERIFY_IS_APPROX( angleToVec(rot2.smallestAngle()), angleToVec(rot3.angle()) ); } s0 = internal::random(-100,100); s1 = internal::random(-100,100); Rotation2D R0(s0), R1(s1); t20 = Translation2(v20) * (R0 * Eigen::Scaling(s0)); t21 = Translation2(v20) * R0 * Eigen::Scaling(s0); VERIFY_IS_APPROX(t20,t21); t20 = Translation2(v20) * (R0 * R0.inverse() * Eigen::Scaling(s0)); t21 = Translation2(v20) * Eigen::Scaling(s0); VERIFY_IS_APPROX(t20,t21); VERIFY_IS_APPROX(s0, (R0.slerp(0, R1)).angle()); VERIFY_IS_APPROX( angleToVec(R1.smallestPositiveAngle()), angleToVec((R0.slerp(1, R1)).smallestPositiveAngle()) ); VERIFY_IS_APPROX(R0.smallestPositiveAngle(), (R0.slerp(0.5, R0)).smallestPositiveAngle()); if(std::cos(s0)>0) VERIFY_IS_MUCH_SMALLER_THAN((R0.slerp(0.5, R0.inverse())).smallestAngle(), Scalar(1)); else VERIFY_IS_APPROX(Scalar(EIGEN_PI), (R0.slerp(0.5, R0.inverse())).smallestPositiveAngle()); // Check path length Scalar l = 0; int path_steps = 100; for(int k=0; k::epsilon()*Scalar(path_steps/2))); // check basic features { Rotation2D r1; // default ctor r1 = Rotation2D(s0); // copy assignment VERIFY_IS_APPROX(r1.angle(),s0); Rotation2D r2(r1); // copy ctor VERIFY_IS_APPROX(r2.angle(),s0); } { Transform3 t32(Matrix4::Random()), t33, t34; t34 = t33 = t32; t32.scale(v0); t33*=AlignedScaling3(v0); VERIFY_IS_APPROX(t32.matrix(), t33.matrix()); t33 = t34 * AlignedScaling3(v0); VERIFY_IS_APPROX(t32.matrix(), t33.matrix()); } } template void transform_associativity_left(const A1& a1, const A2& a2, const P& p, const Q& q, const V& v, const H& h) { VERIFY_IS_APPROX( q*(a1*v), (q*a1)*v ); VERIFY_IS_APPROX( q*(a2*v), (q*a2)*v ); VERIFY_IS_APPROX( q*(p*h).hnormalized(), ((q*p)*h).hnormalized() ); } template void transform_associativity2(const A1& a1, const A2& a2, const P& p, const Q& q, const V& v, const H& h) { VERIFY_IS_APPROX( a1*(q*v), (a1*q)*v ); VERIFY_IS_APPROX( a2*(q*v), (a2*q)*v ); VERIFY_IS_APPROX( p *(q*v).homogeneous(), (p *q)*v.homogeneous() ); transform_associativity_left(a1, a2,p, q, v, h); } template void transform_associativity(const RotationType& R) { typedef Matrix VectorType; typedef Matrix HVectorType; typedef Matrix LinearType; typedef Matrix MatrixType; typedef Transform AffineCompactType; typedef Transform AffineType; typedef Transform ProjectiveType; typedef DiagonalMatrix ScalingType; typedef Translation TranslationType; AffineCompactType A1c; A1c.matrix().setRandom(); AffineCompactType A2c; A2c.matrix().setRandom(); AffineType A1(A1c); AffineType A2(A2c); ProjectiveType P1; P1.matrix().setRandom(); VectorType v1 = VectorType::Random(); VectorType v2 = VectorType::Random(); HVectorType h1 = HVectorType::Random(); Scalar s1 = internal::random(); LinearType L = LinearType::Random(); MatrixType M = MatrixType::Random(); CALL_SUBTEST( transform_associativity2(A1c, A1, P1, A2, v2, h1) ); CALL_SUBTEST( transform_associativity2(A1c, A1, P1, A2c, v2, h1) ); CALL_SUBTEST( transform_associativity2(A1c, A1, P1, v1.asDiagonal(), v2, h1) ); CALL_SUBTEST( transform_associativity2(A1c, A1, P1, ScalingType(v1), v2, h1) ); CALL_SUBTEST( transform_associativity2(A1c, A1, P1, Scaling(v1), v2, h1) ); CALL_SUBTEST( transform_associativity2(A1c, A1, P1, Scaling(s1), v2, h1) ); CALL_SUBTEST( transform_associativity2(A1c, A1, P1, TranslationType(v1), v2, h1) ); CALL_SUBTEST( transform_associativity_left(A1c, A1, P1, L, v2, h1) ); CALL_SUBTEST( transform_associativity2(A1c, A1, P1, R, v2, h1) ); VERIFY_IS_APPROX( A1*(M*h1), (A1*M)*h1 ); VERIFY_IS_APPROX( A1c*(M*h1), (A1c*M)*h1 ); VERIFY_IS_APPROX( P1*(M*h1), (P1*M)*h1 ); VERIFY_IS_APPROX( M*(A1*h1), (M*A1)*h1 ); VERIFY_IS_APPROX( M*(A1c*h1), (M*A1c)*h1 ); VERIFY_IS_APPROX( M*(P1*h1), ((M*P1)*h1) ); } template void transform_alignment() { typedef Transform Projective3a; typedef Transform Projective3u; EIGEN_ALIGN_MAX Scalar array1[16]; EIGEN_ALIGN_MAX Scalar array2[16]; EIGEN_ALIGN_MAX Scalar array3[16+1]; Scalar* array3u = array3+1; Projective3a *p1 = ::new(reinterpret_cast(array1)) Projective3a; Projective3u *p2 = ::new(reinterpret_cast(array2)) Projective3u; Projective3u *p3 = ::new(reinterpret_cast(array3u)) Projective3u; p1->matrix().setRandom(); *p2 = *p1; *p3 = *p1; VERIFY_IS_APPROX(p1->matrix(), p2->matrix()); VERIFY_IS_APPROX(p1->matrix(), p3->matrix()); VERIFY_IS_APPROX( (*p1) * (*p1), (*p2)*(*p3)); } template void transform_products() { typedef Matrix Mat; typedef Transform Proj; typedef Transform Aff; typedef Transform AffC; Proj p; p.matrix().setRandom(); Aff a; a.linear().setRandom(); a.translation().setRandom(); AffC ac = a; Mat p_m(p.matrix()), a_m(a.matrix()); VERIFY_IS_APPROX((p*p).matrix(), p_m*p_m); VERIFY_IS_APPROX((a*a).matrix(), a_m*a_m); VERIFY_IS_APPROX((p*a).matrix(), p_m*a_m); VERIFY_IS_APPROX((a*p).matrix(), a_m*p_m); VERIFY_IS_APPROX((ac*a).matrix(), a_m*a_m); VERIFY_IS_APPROX((a*ac).matrix(), a_m*a_m); VERIFY_IS_APPROX((p*ac).matrix(), p_m*a_m); VERIFY_IS_APPROX((ac*p).matrix(), a_m*p_m); } template void transformations_no_scale() { /* this test covers the following files: Cross.h Quaternion.h, Transform.h */ typedef Matrix Vector3; typedef Matrix Vector4; typedef Quaternion Quaternionx; typedef AngleAxis AngleAxisx; typedef Transform Transform3; typedef Translation Translation3; typedef Matrix Matrix4; Vector3 v0 = Vector3::Random(), v1 = Vector3::Random(); Transform3 t0, t1, t2; Scalar a = internal::random(-Scalar(EIGEN_PI), Scalar(EIGEN_PI)); Quaternionx q1, q2; q1 = AngleAxisx(a, v0.normalized()); t0 = Transform3::Identity(); VERIFY_IS_APPROX(t0.matrix(), Transform3::MatrixType::Identity()); t0.setIdentity(); t1.setIdentity(); v1 = Vector3::Ones(); t0.linear() = q1.toRotationMatrix(); t0.pretranslate(v0); t1.linear() = q1.conjugate().toRotationMatrix(); t1.translate(-v0); VERIFY((t0 * t1).matrix().isIdentity(test_precision())); t1.fromPositionOrientationScale(v0, q1, v1); VERIFY_IS_APPROX(t1.matrix(), t0.matrix()); VERIFY_IS_APPROX(t1*v1, t0*v1); // translation * vector t0.setIdentity(); t0.translate(v0); VERIFY_IS_APPROX((t0 * v1).template head<3>(), Translation3(v0) * v1); // Conversion to matrix. Transform3 t3; t3.linear() = q1.toRotationMatrix(); t3.translation() = v1; Matrix4 m3 = t3.matrix(); VERIFY((m3 * m3.inverse()).isIdentity(test_precision())); // Verify implicit last row is initialized. VERIFY_IS_APPROX(Vector4(m3.row(3)), Vector4(0.0, 0.0, 0.0, 1.0)); VERIFY_IS_APPROX(t3.rotation(), t3.linear()); if(Mode==Isometry) VERIFY(t3.rotation().data()==t3.linear().data()); } template void transformations_computed_scaling_continuity() { typedef Matrix Vector3; typedef Transform Transform3; typedef Matrix Matrix3; // Given: two transforms that differ by '2*eps'. Scalar eps(1e-3); Vector3 v0 = Vector3::Random().normalized(), v1 = Vector3::Random().normalized(), v3 = Vector3::Random().normalized(); Transform3 t0, t1; // The interesting case is when their determinants have different signs. Matrix3 rank2 = 50 * v0 * v0.adjoint() + 20 * v1 * v1.adjoint(); t0.linear() = rank2 + eps * v3 * v3.adjoint(); t1.linear() = rank2 - eps * v3 * v3.adjoint(); // When: computing the rotation-scaling parts Matrix3 r0, s0, r1, s1; t0.computeRotationScaling(&r0, &s0); t1.computeRotationScaling(&r1, &s1); // Then: the scaling parts should differ by no more than '2*eps'. const Scalar c(2.1); // 2 + room for rounding errors VERIFY((s0 - s1).norm() < c * eps); } EIGEN_DECLARE_TEST(geo_transformations) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(( transformations() )); CALL_SUBTEST_1(( non_projective_only() )); CALL_SUBTEST_1(( transformations_computed_scaling_continuity() )); CALL_SUBTEST_2(( transformations() )); CALL_SUBTEST_2(( non_projective_only() )); CALL_SUBTEST_2(( transform_alignment() )); CALL_SUBTEST_3(( transformations() )); CALL_SUBTEST_3(( transformations() )); CALL_SUBTEST_3(( transform_alignment() )); CALL_SUBTEST_4(( transformations() )); CALL_SUBTEST_4(( non_projective_only() )); CALL_SUBTEST_5(( transformations() )); CALL_SUBTEST_5(( non_projective_only() )); CALL_SUBTEST_6(( transformations() )); CALL_SUBTEST_6(( transformations() )); CALL_SUBTEST_7(( transform_products() )); CALL_SUBTEST_7(( transform_products() )); CALL_SUBTEST_8(( transform_associativity(Rotation2D(internal::random()*double(EIGEN_PI))) )); CALL_SUBTEST_8(( transform_associativity(Quaterniond::UnitRandom()) )); CALL_SUBTEST_9(( transformations_no_scale() )); CALL_SUBTEST_9(( transformations_no_scale() )); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/gpu_basic.cu ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015-2016 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // workaround issue between gcc >= 4.7 and cuda 5.5 #if (defined __GNUC__) && (__GNUC__>4 || __GNUC_MINOR__>=7) #undef _GLIBCXX_ATOMIC_BUILTINS #undef _GLIBCXX_USE_INT128 #endif #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #include "main.h" #include "gpu_common.h" // Check that dense modules can be properly parsed by nvcc #include // struct Foo{ // EIGEN_DEVICE_FUNC // void operator()(int i, const float* mats, float* vecs) const { // using namespace Eigen; // // Matrix3f M(data); // // Vector3f x(data+9); // // Map(data+9) = M.inverse() * x; // Matrix3f M(mats+i/16); // Vector3f x(vecs+i*3); // // using std::min; // // using std::sqrt; // Map(vecs+i*3) << x.minCoeff(), 1, 2;// / x.dot(x);//(M.inverse() * x) / x.x(); // //x = x*2 + x.y() * x + x * x.maxCoeff() - x / x.sum(); // } // }; template struct coeff_wise { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; T x1(in+i); T x2(in+i+1); T x3(in+i+2); Map res(out+i*T::MaxSizeAtCompileTime); res.array() += (in[0] * x1 + x2).array() * x3.array(); } }; template struct complex_sqrt { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; typedef typename T::Scalar ComplexType; typedef typename T::Scalar::value_type ValueType; const int num_special_inputs = 18; if (i == 0) { const ValueType nan = std::numeric_limits::quiet_NaN(); typedef Eigen::Vector SpecialInputs; SpecialInputs special_in; special_in.setZero(); int idx = 0; special_in[idx++] = ComplexType(0, 0); special_in[idx++] = ComplexType(-0, 0); special_in[idx++] = ComplexType(0, -0); special_in[idx++] = ComplexType(-0, -0); // GCC's fallback sqrt implementation fails for inf inputs. // It is called when _GLIBCXX_USE_C99_COMPLEX is false or if // clang includes the GCC header (which temporarily disables // _GLIBCXX_USE_C99_COMPLEX) #if !defined(_GLIBCXX_COMPLEX) || \ (_GLIBCXX_USE_C99_COMPLEX && !defined(__CLANG_CUDA_WRAPPERS_COMPLEX)) const ValueType inf = std::numeric_limits::infinity(); special_in[idx++] = ComplexType(1.0, inf); special_in[idx++] = ComplexType(nan, inf); special_in[idx++] = ComplexType(1.0, -inf); special_in[idx++] = ComplexType(nan, -inf); special_in[idx++] = ComplexType(-inf, 1.0); special_in[idx++] = ComplexType(inf, 1.0); special_in[idx++] = ComplexType(-inf, -1.0); special_in[idx++] = ComplexType(inf, -1.0); special_in[idx++] = ComplexType(-inf, nan); special_in[idx++] = ComplexType(inf, nan); #endif special_in[idx++] = ComplexType(1.0, nan); special_in[idx++] = ComplexType(nan, 1.0); special_in[idx++] = ComplexType(nan, -1.0); special_in[idx++] = ComplexType(nan, nan); Map special_out(out); special_out = special_in.cwiseSqrt(); } T x1(in + i); Map res(out + num_special_inputs + i*T::MaxSizeAtCompileTime); res = x1.cwiseSqrt(); } }; template struct complex_operators { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; typedef typename T::Scalar ComplexType; typedef typename T::Scalar::value_type ValueType; const int num_scalar_operators = 24; const int num_vector_operators = 23; // no unary + operator. int out_idx = i * (num_scalar_operators + num_vector_operators * T::MaxSizeAtCompileTime); // Scalar operators. const ComplexType a = in[i]; const ComplexType b = in[i + 1]; out[out_idx++] = +a; out[out_idx++] = -a; out[out_idx++] = a + b; out[out_idx++] = a + numext::real(b); out[out_idx++] = numext::real(a) + b; out[out_idx++] = a - b; out[out_idx++] = a - numext::real(b); out[out_idx++] = numext::real(a) - b; out[out_idx++] = a * b; out[out_idx++] = a * numext::real(b); out[out_idx++] = numext::real(a) * b; out[out_idx++] = a / b; out[out_idx++] = a / numext::real(b); out[out_idx++] = numext::real(a) / b; #if !defined(EIGEN_COMP_MSVC) out[out_idx] = a; out[out_idx++] += b; out[out_idx] = a; out[out_idx++] -= b; out[out_idx] = a; out[out_idx++] *= b; out[out_idx] = a; out[out_idx++] /= b; #endif const ComplexType true_value = ComplexType(ValueType(1), ValueType(0)); const ComplexType false_value = ComplexType(ValueType(0), ValueType(0)); out[out_idx++] = (a == b ? true_value : false_value); out[out_idx++] = (a == numext::real(b) ? true_value : false_value); out[out_idx++] = (numext::real(a) == b ? true_value : false_value); out[out_idx++] = (a != b ? true_value : false_value); out[out_idx++] = (a != numext::real(b) ? true_value : false_value); out[out_idx++] = (numext::real(a) != b ? true_value : false_value); // Vector versions. T x1(in + i); T x2(in + i + 1); const int res_size = T::MaxSizeAtCompileTime * num_scalar_operators; const int size = T::MaxSizeAtCompileTime; int block_idx = 0; Map> res(out + out_idx, res_size); res.segment(block_idx, size) = -x1; block_idx += size; res.segment(block_idx, size) = x1 + x2; block_idx += size; res.segment(block_idx, size) = x1 + x2.real(); block_idx += size; res.segment(block_idx, size) = x1.real() + x2; block_idx += size; res.segment(block_idx, size) = x1 - x2; block_idx += size; res.segment(block_idx, size) = x1 - x2.real(); block_idx += size; res.segment(block_idx, size) = x1.real() - x2; block_idx += size; res.segment(block_idx, size) = x1.array() * x2.array(); block_idx += size; res.segment(block_idx, size) = x1.array() * x2.real().array(); block_idx += size; res.segment(block_idx, size) = x1.real().array() * x2.array(); block_idx += size; res.segment(block_idx, size) = x1.array() / x2.array(); block_idx += size; res.segment(block_idx, size) = x1.array() / x2.real().array(); block_idx += size; res.segment(block_idx, size) = x1.real().array() / x2.array(); block_idx += size; #if !defined(EIGEN_COMP_MSVC) res.segment(block_idx, size) = x1; res.segment(block_idx, size) += x2; block_idx += size; res.segment(block_idx, size) = x1; res.segment(block_idx, size) -= x2; block_idx += size; res.segment(block_idx, size) = x1; res.segment(block_idx, size).array() *= x2.array(); block_idx += size; res.segment(block_idx, size) = x1; res.segment(block_idx, size).array() /= x2.array(); block_idx += size; #endif const T true_vector = T::Constant(true_value); const T false_vector = T::Constant(false_value); res.segment(block_idx, size) = (x1 == x2 ? true_vector : false_vector); block_idx += size; // Mixing types in equality comparison does not work. // res.segment(block_idx, size) = (x1 == x2.real() ? true_vector : false_vector); // block_idx += size; // res.segment(block_idx, size) = (x1.real() == x2 ? true_vector : false_vector); // block_idx += size; res.segment(block_idx, size) = (x1 != x2 ? true_vector : false_vector); block_idx += size; // res.segment(block_idx, size) = (x1 != x2.real() ? true_vector : false_vector); // block_idx += size; // res.segment(block_idx, size) = (x1.real() != x2 ? true_vector : false_vector); // block_idx += size; } }; template struct replicate { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; T x1(in+i); int step = x1.size() * 4; int stride = 3 * step; typedef Map > MapType; MapType(out+i*stride+0*step, x1.rows()*2, x1.cols()*2) = x1.replicate(2,2); MapType(out+i*stride+1*step, x1.rows()*3, x1.cols()) = in[i] * x1.colwise().replicate(3); MapType(out+i*stride+2*step, x1.rows(), x1.cols()*3) = in[i] * x1.rowwise().replicate(3); } }; template struct alloc_new_delete { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { int offset = 2*i*T::MaxSizeAtCompileTime; T* x = new T(in + offset); Eigen::Map u(out + offset); u = *x; delete x; offset += T::MaxSizeAtCompileTime; T* y = new T[1]; y[0] = T(in + offset); Eigen::Map v(out + offset); v = y[0]; delete[] y; } }; template struct redux { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; int N = 10; T x1(in+i); out[i*N+0] = x1.minCoeff(); out[i*N+1] = x1.maxCoeff(); out[i*N+2] = x1.sum(); out[i*N+3] = x1.prod(); out[i*N+4] = x1.matrix().squaredNorm(); out[i*N+5] = x1.matrix().norm(); out[i*N+6] = x1.colwise().sum().maxCoeff(); out[i*N+7] = x1.rowwise().maxCoeff().sum(); out[i*N+8] = x1.matrix().colwise().squaredNorm().sum(); } }; template struct prod_test { EIGEN_DEVICE_FUNC void operator()(int i, const typename T1::Scalar* in, typename T1::Scalar* out) const { using namespace Eigen; typedef Matrix T3; T1 x1(in+i); T2 x2(in+i+1); Map res(out+i*T3::MaxSizeAtCompileTime); res += in[i] * x1 * x2; } }; template struct diagonal { EIGEN_DEVICE_FUNC void operator()(int i, const typename T1::Scalar* in, typename T1::Scalar* out) const { using namespace Eigen; T1 x1(in+i); Map res(out+i*T2::MaxSizeAtCompileTime); res += x1.diagonal(); } }; template struct eigenvalues_direct { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; typedef Matrix Vec; T M(in+i); Map res(out+i*Vec::MaxSizeAtCompileTime); T A = M*M.adjoint(); SelfAdjointEigenSolver eig; eig.computeDirect(A); res = eig.eigenvalues(); } }; template struct eigenvalues { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; typedef Matrix Vec; T M(in+i); Map res(out+i*Vec::MaxSizeAtCompileTime); T A = M*M.adjoint(); SelfAdjointEigenSolver eig; eig.compute(A); res = eig.eigenvalues(); } }; template struct matrix_inverse { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { using namespace Eigen; T M(in+i); Map res(out+i*T::MaxSizeAtCompileTime); res = M.inverse(); } }; template struct numeric_limits_test { EIGEN_DEVICE_FUNC void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const { EIGEN_UNUSED_VARIABLE(in) int out_idx = i * 5; out[out_idx++] = numext::numeric_limits::epsilon(); out[out_idx++] = (numext::numeric_limits::max)(); out[out_idx++] = (numext::numeric_limits::min)(); out[out_idx++] = numext::numeric_limits::infinity(); out[out_idx++] = numext::numeric_limits::quiet_NaN(); } }; template bool verifyIsApproxWithInfsNans(const Type1& a, const Type2& b, typename Type1::Scalar* = 0) // Enabled for Eigen's type only { if (a.rows() != b.rows()) { return false; } if (a.cols() != b.cols()) { return false; } for (Index r = 0; r < a.rows(); ++r) { for (Index c = 0; c < a.cols(); ++c) { if (a(r, c) != b(r, c) && !((numext::isnan)(a(r, c)) && (numext::isnan)(b(r, c))) && !test_isApprox(a(r, c), b(r, c))) { return false; } } } return true; } template void test_with_infs_nans(const Kernel& ker, int n, const Input& in, Output& out) { Output out_ref, out_gpu; #if !defined(EIGEN_GPU_COMPILE_PHASE) out_ref = out_gpu = out; #else EIGEN_UNUSED_VARIABLE(in); EIGEN_UNUSED_VARIABLE(out); #endif run_on_cpu (ker, n, in, out_ref); run_on_gpu(ker, n, in, out_gpu); #if !defined(EIGEN_GPU_COMPILE_PHASE) verifyIsApproxWithInfsNans(out_ref, out_gpu); #endif } EIGEN_DECLARE_TEST(gpu_basic) { ei_test_init_gpu(); int nthreads = 100; Eigen::VectorXf in, out; Eigen::VectorXcf cfin, cfout; #if !defined(EIGEN_GPU_COMPILE_PHASE) int data_size = nthreads * 512; in.setRandom(data_size); out.setConstant(data_size, -1); cfin.setRandom(data_size); cfout.setConstant(data_size, -1); #endif CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise(), nthreads, in, out) ); #if !defined(EIGEN_USE_HIP) // FIXME // These subtests result in a compile failure on the HIP platform // // eigen-upstream/Eigen/src/Core/Replicate.h:61:65: error: // base class 'internal::dense_xpr_base, -1, -1> >::type' // (aka 'ArrayBase, -1, -1> >') has protected default constructor CALL_SUBTEST( run_and_compare_to_gpu(replicate(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(replicate(), nthreads, in, out) ); // HIP does not support new/delete on device. CALL_SUBTEST( run_and_compare_to_gpu(alloc_new_delete(), nthreads, in, out) ); #endif CALL_SUBTEST( run_and_compare_to_gpu(redux(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(redux(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(prod_test(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(prod_test(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(diagonal(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(diagonal(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct(), nthreads, in, out) ); CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct(), nthreads, in, out) ); // Test std::complex. CALL_SUBTEST( run_and_compare_to_gpu(complex_operators(), nthreads, cfin, cfout) ); CALL_SUBTEST( test_with_infs_nans(complex_sqrt(), nthreads, cfin, cfout) ); // numeric_limits CALL_SUBTEST( test_with_infs_nans(numeric_limits_test(), 1, in, out) ); #if defined(__NVCC__) // FIXME // These subtests compiles only with nvcc and fail with HIPCC and clang-cuda CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues(), nthreads, in, out) ); typedef Matrix Matrix6f; CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues(), nthreads, in, out) ); #endif } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/gpu_common.h ================================================ #ifndef EIGEN_TEST_GPU_COMMON_H #define EIGEN_TEST_GPU_COMMON_H #ifdef EIGEN_USE_HIP #include #include #else #include #include #include #endif #include #define EIGEN_USE_GPU #include #if !defined(__CUDACC__) && !defined(__HIPCC__) dim3 threadIdx, blockDim, blockIdx; #endif template void run_on_cpu(const Kernel& ker, int n, const Input& in, Output& out) { for(int i=0; i __global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void run_on_gpu_meta_kernel(const Kernel ker, int n, const Input* in, Output* out) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i void run_on_gpu(const Kernel& ker, int n, const Input& in, Output& out) { typename Input::Scalar* d_in; typename Output::Scalar* d_out; std::ptrdiff_t in_bytes = in.size() * sizeof(typename Input::Scalar); std::ptrdiff_t out_bytes = out.size() * sizeof(typename Output::Scalar); gpuMalloc((void**)(&d_in), in_bytes); gpuMalloc((void**)(&d_out), out_bytes); gpuMemcpy(d_in, in.data(), in_bytes, gpuMemcpyHostToDevice); gpuMemcpy(d_out, out.data(), out_bytes, gpuMemcpyHostToDevice); // Simple and non-optimal 1D mapping assuming n is not too large // That's only for unit testing! dim3 Blocks(128); dim3 Grids( (n+int(Blocks.x)-1)/int(Blocks.x) ); gpuDeviceSynchronize(); #ifdef EIGEN_USE_HIP hipLaunchKernelGGL(HIP_KERNEL_NAME(run_on_gpu_meta_kernel::type, typename std::decay::type>), dim3(Grids), dim3(Blocks), 0, 0, ker, n, d_in, d_out); #else run_on_gpu_meta_kernel<<>>(ker, n, d_in, d_out); #endif // Pre-launch errors. gpuError_t err = gpuGetLastError(); if (err != gpuSuccess) { printf("%s: %s\n", gpuGetErrorName(err), gpuGetErrorString(err)); gpu_assert(false); } // Kernel execution errors. err = gpuDeviceSynchronize(); if (err != gpuSuccess) { printf("%s: %s\n", gpuGetErrorName(err), gpuGetErrorString(err)); gpu_assert(false); } // check inputs have not been modified gpuMemcpy(const_cast(in.data()), d_in, in_bytes, gpuMemcpyDeviceToHost); gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost); gpuFree(d_in); gpuFree(d_out); } template void run_and_compare_to_gpu(const Kernel& ker, int n, const Input& in, Output& out) { Input in_ref, in_gpu; Output out_ref, out_gpu; #if !defined(EIGEN_GPU_COMPILE_PHASE) in_ref = in_gpu = in; out_ref = out_gpu = out; #else EIGEN_UNUSED_VARIABLE(in); EIGEN_UNUSED_VARIABLE(out); #endif run_on_cpu (ker, n, in_ref, out_ref); run_on_gpu(ker, n, in_gpu, out_gpu); #if !defined(EIGEN_GPU_COMPILE_PHASE) VERIFY_IS_APPROX(in_ref, in_gpu); VERIFY_IS_APPROX(out_ref, out_gpu); #endif } struct compile_time_device_info { EIGEN_DEVICE_FUNC void operator()(int i, const int* /*in*/, int* info) const { if (i == 0) { EIGEN_UNUSED_VARIABLE(info) #if defined(__CUDA_ARCH__) info[0] = int(__CUDA_ARCH__ +0); #endif #if defined(EIGEN_HIP_DEVICE_COMPILE) info[1] = int(EIGEN_HIP_DEVICE_COMPILE +0); #endif } } }; void ei_test_init_gpu() { int device = 0; gpuDeviceProp_t deviceProp; gpuGetDeviceProperties(&deviceProp, device); ArrayXi dummy(1), info(10); info = -1; run_on_gpu(compile_time_device_info(),10,dummy,info); std::cout << "GPU compile-time info:\n"; #ifdef EIGEN_CUDACC std::cout << " EIGEN_CUDACC: " << int(EIGEN_CUDACC) << "\n"; #endif #ifdef EIGEN_CUDA_SDK_VER std::cout << " EIGEN_CUDA_SDK_VER: " << int(EIGEN_CUDA_SDK_VER) << "\n"; #endif #ifdef EIGEN_COMP_NVCC std::cout << " EIGEN_COMP_NVCC: " << int(EIGEN_COMP_NVCC) << "\n"; #endif #ifdef EIGEN_HIPCC std::cout << " EIGEN_HIPCC: " << int(EIGEN_HIPCC) << "\n"; #endif std::cout << " EIGEN_CUDA_ARCH: " << info[0] << "\n"; std::cout << " EIGEN_HIP_DEVICE_COMPILE: " << info[1] << "\n"; std::cout << "GPU device info:\n"; std::cout << " name: " << deviceProp.name << "\n"; std::cout << " capability: " << deviceProp.major << "." << deviceProp.minor << "\n"; std::cout << " multiProcessorCount: " << deviceProp.multiProcessorCount << "\n"; std::cout << " maxThreadsPerMultiProcessor: " << deviceProp.maxThreadsPerMultiProcessor << "\n"; std::cout << " warpSize: " << deviceProp.warpSize << "\n"; std::cout << " regsPerBlock: " << deviceProp.regsPerBlock << "\n"; std::cout << " concurrentKernels: " << deviceProp.concurrentKernels << "\n"; std::cout << " clockRate: " << deviceProp.clockRate << "\n"; std::cout << " canMapHostMemory: " << deviceProp.canMapHostMemory << "\n"; std::cout << " computeMode: " << deviceProp.computeMode << "\n"; } #endif // EIGEN_TEST_GPU_COMMON_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/gpu_example.cu ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2021 The Eigen Team. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // The following is an example GPU test. #include "main.h" // Include the main test utilities. // Define a kernel functor. // // The kernel must be a POD type and implement operator(). struct AddKernel { // Parameters must be POD or serializable Eigen types (e.g. Matrix, // Array). The return value must be a POD or serializable value type. template EIGEN_DEVICE_FUNC Type3 operator()(const Type1& A, const Type2& B, Type3& C) const { C = A + B; // Populate output parameter. Type3 D = A + B; // Populate return value. return D; } }; // Define a sub-test that uses the kernel. template void test_add(const T& type) { const Index rows = type.rows(); const Index cols = type.cols(); // Create random inputs. const T A = T::Random(rows, cols); const T B = T::Random(rows, cols); T C; // Output parameter. // Create kernel. AddKernel add_kernel; // Run add_kernel(A, B, C) via run(...). // This will run on the GPU if using a GPU compiler, or CPU otherwise, // facilitating generic tests that can run on either. T D = run(add_kernel, A, B, C); // Check that both output parameter and return value are correctly populated. const T expected = A + B; VERIFY_IS_CWISE_EQUAL(C, expected); VERIFY_IS_CWISE_EQUAL(D, expected); // In a GPU-only test, we can verify that the CPU and GPU produce the // same results. T C_cpu, C_gpu; T D_cpu = run_on_cpu(add_kernel, A, B, C_cpu); // Runs on CPU. T D_gpu = run_on_gpu(add_kernel, A, B, C_gpu); // Runs on GPU. VERIFY_IS_CWISE_EQUAL(C_cpu, C_gpu); VERIFY_IS_CWISE_EQUAL(D_cpu, D_gpu); }; struct MultiplyKernel { template EIGEN_DEVICE_FUNC Type3 operator()(const Type1& A, const Type2& B, Type3& C) const { C = A * B; return A * B; } }; template void test_multiply(const T1& type1, const T2& type2, const T3& type3) { const T1 A = T1::Random(type1.rows(), type1.cols()); const T2 B = T2::Random(type2.rows(), type2.cols()); T3 C; MultiplyKernel multiply_kernel; // The run(...) family of functions uses a memory buffer to transfer data back // and forth to and from the device. The size of this buffer is estimated // from the size of all input parameters. If the estimated buffer size is // not sufficient for transferring outputs from device-to-host, then an // explicit buffer size needs to be specified. // 2 outputs of size (A * B). For each matrix output, the buffer will store // the number of rows, columns, and the data. size_t buffer_capacity_hint = 2 * ( // 2 output parameters 2 * sizeof(typename T3::Index) // # Rows, # Cols + A.rows() * B.cols() * sizeof(typename T3::Scalar)); // Output data T3 D = run_with_hint(buffer_capacity_hint, multiply_kernel, A, B, C); const T3 expected = A * B; VERIFY_IS_CWISE_APPROX(C, expected); VERIFY_IS_CWISE_APPROX(D, expected); T3 C_cpu, C_gpu; T3 D_cpu = run_on_cpu(multiply_kernel, A, B, C_cpu); T3 D_gpu = run_on_gpu_with_hint(buffer_capacity_hint, multiply_kernel, A, B, C_gpu); VERIFY_IS_CWISE_APPROX(C_cpu, C_gpu); VERIFY_IS_CWISE_APPROX(D_cpu, D_gpu); } // Declare the test fixture. EIGEN_DECLARE_TEST(gpu_example) { // For the number of repeats, call the desired subtests. for(int i = 0; i < g_repeat; i++) { // Call subtests with different sized/typed inputs. CALL_SUBTEST( test_add(Eigen::Vector3f()) ); CALL_SUBTEST( test_add(Eigen::Matrix3d()) ); #if !defined(EIGEN_USE_HIP) // FIXME CALL_SUBTEST( test_add(Eigen::MatrixX(10, 10)) ); #endif CALL_SUBTEST( test_add(Eigen::Array44f()) ); #if !defined(EIGEN_USE_HIP) CALL_SUBTEST( test_add(Eigen::ArrayXd(20)) ); CALL_SUBTEST( test_add(Eigen::ArrayXXi(13, 17)) ); #endif CALL_SUBTEST( test_multiply(Eigen::Matrix3d(), Eigen::Matrix3d(), Eigen::Matrix3d()) ); #if !defined(EIGEN_USE_HIP) CALL_SUBTEST( test_multiply(Eigen::MatrixX(10, 10), Eigen::MatrixX(10, 10), Eigen::MatrixX()) ); CALL_SUBTEST( test_multiply(Eigen::MatrixXf(12, 1), Eigen::MatrixXf(1, 32), Eigen::MatrixXf()) ); #endif } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/gpu_test_helper.h ================================================ #ifndef GPU_TEST_HELPER_H #define GPU_TEST_HELPER_H #include #ifdef EIGEN_GPUCC #define EIGEN_USE_GPU #include "../unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h" #endif // EIGEN_GPUCC // std::tuple cannot be used on device, and there is a bug in cuda < 9.2 that // doesn't allow std::tuple to compile for host code either. In these cases, // use our custom implementation. #if defined(EIGEN_GPU_COMPILE_PHASE) || (defined(EIGEN_CUDACC) && EIGEN_CUDA_SDK_VER < 92000) #define EIGEN_USE_CUSTOM_TUPLE 1 #else #define EIGEN_USE_CUSTOM_TUPLE 0 #endif #if EIGEN_USE_CUSTOM_TUPLE #include "../Eigen/src/Core/arch/GPU/Tuple.h" #else #include #endif namespace Eigen { namespace internal { // Note: cannot re-use tuple_impl, since that will cause havoc for // tuple_test. namespace test_detail { // Use std::tuple on CPU, otherwise use the GPU-specific versions. #if !EIGEN_USE_CUSTOM_TUPLE using std::tuple; using std::get; using std::make_tuple; using std::tie; #else using tuple_impl::tuple; using tuple_impl::get; using tuple_impl::make_tuple; using tuple_impl::tie; #endif #undef EIGEN_USE_CUSTOM_TUPLE } // namespace test_detail template struct extract_output_indices_helper; /** * Extracts a set of indices corresponding to non-const l-value reference * output types. * * \internal * \tparam N the number of types {T1, Ts...}. * \tparam Idx the "index" to append if T1 is an output type. * \tparam OutputIndices the current set of output indices. * \tparam T1 the next type to consider, with index Idx. * \tparam Ts the remaining types. */ template struct extract_output_indices_helper, T1, Ts...> { using type = typename extract_output_indices_helper< N - 1, Idx + 1, typename std::conditional< // If is a non-const l-value reference, append index. std::is_lvalue_reference::value && !std::is_const::type>::value, index_sequence, index_sequence >::type, Ts...>::type; }; // Base case. template struct extract_output_indices_helper<0, Idx, index_sequence > { using type = index_sequence; }; // Extracts a set of indices into Types... that correspond to non-const // l-value references. template using extract_output_indices = typename extract_output_indices_helper, Types...>::type; // Helper struct for dealing with Generic functors that may return void. struct void_helper { struct Void {}; // Converts void -> Void, T otherwise. template using ReturnType = typename std::conditional::value, Void, T>::type; // Non-void return value. template static EIGEN_ALWAYS_INLINE EIGEN_DEVICE_FUNC auto call(Func&& func, Args&&... args) -> typename std::enable_if::value, decltype(func(args...))>::type { return func(std::forward(args)...); } // Void return value. template static EIGEN_ALWAYS_INLINE EIGEN_DEVICE_FUNC auto call(Func&& func, Args&&... args) -> typename std::enable_if::value, Void>::type { func(std::forward(args)...); return Void{}; } // Restores the original return type, Void -> void, T otherwise. template static EIGEN_ALWAYS_INLINE EIGEN_DEVICE_FUNC typename std::enable_if::type, Void>::value, T>::type restore(T&& val) { return val; } // Void case. template static EIGEN_ALWAYS_INLINE EIGEN_DEVICE_FUNC void restore(const Void&) {} }; // Runs a kernel via serialized buffer. Does this by deserializing the buffer // to construct the arguments, calling the kernel, then re-serialing the outputs. // The buffer contains // [ input_buffer_size, args ] // After the kernel call, it is then populated with // [ output_buffer_size, output_parameters, return_value ] // If the output_buffer_size exceeds the buffer's capacity, then only the // output_buffer_size is populated. template EIGEN_DEVICE_FUNC void run_serialized(index_sequence, index_sequence, Kernel kernel, uint8_t* buffer, size_t capacity) { using test_detail::get; using test_detail::make_tuple; using test_detail::tuple; // Deserialize input size and inputs. size_t input_size; uint8_t* buff_ptr = Eigen::deserialize(buffer, input_size); // Create value-type instances to populate. auto args = make_tuple(typename std::decay::type{}...); EIGEN_UNUSED_VARIABLE(args) // Avoid NVCC compile warning. // NVCC 9.1 requires us to spell out the template parameters explicitly. buff_ptr = Eigen::deserialize(buff_ptr, get::type...>(args)...); // Call function, with void->Void conversion so we are guaranteed a complete // output type. auto result = void_helper::call(kernel, get::type...>(args)...); // Determine required output size. size_t output_size = Eigen::serialize_size(capacity); output_size += Eigen::serialize_size(get::type...>(args)...); output_size += Eigen::serialize_size(result); // Always serialize required buffer size. buff_ptr = Eigen::serialize(buffer, output_size); // Serialize outputs if they fit in the buffer. if (output_size <= capacity) { // Collect outputs and result. buff_ptr = Eigen::serialize(buff_ptr, get::type...>(args)...); buff_ptr = Eigen::serialize(buff_ptr, result); } } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run_serialized(Kernel kernel, uint8_t* buffer, size_t capacity) { run_serialized (make_index_sequence{}, extract_output_indices{}, kernel, buffer, capacity); } #ifdef EIGEN_GPUCC // Checks for GPU errors and asserts / prints the error message. #define GPU_CHECK(expr) \ do { \ gpuError_t err = expr; \ if (err != gpuSuccess) { \ printf("%s: %s\n", gpuGetErrorName(err), gpuGetErrorString(err)); \ gpu_assert(false); \ } \ } while(0) // Calls run_serialized on the GPU. template __global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void run_serialized_on_gpu_meta_kernel(const Kernel kernel, uint8_t* buffer, size_t capacity) { run_serialized(kernel, buffer, capacity); } // Runs kernel(args...) on the GPU via the serialization mechanism. // // Note: this may end up calling the kernel multiple times if the initial output // buffer is not large enough to hold the outputs. template auto run_serialized_on_gpu(size_t buffer_capacity_hint, index_sequence, index_sequence, Kernel kernel, Args&&... args) -> decltype(kernel(args...)) { // Compute the required serialization buffer capacity. // Round up input size to next power of two to give a little extra room // for outputs. size_t input_data_size = sizeof(size_t) + Eigen::serialize_size(args...); size_t capacity; if (buffer_capacity_hint == 0) { // Estimate as the power of two larger than the total input size. capacity = sizeof(size_t); while (capacity <= input_data_size) { capacity *= 2; } } else { // Use the larger of the hint and the total input size. // Add sizeof(size_t) to the hint to account for storing the buffer capacity // itself so the user doesn't need to think about this. capacity = std::max(buffer_capacity_hint + sizeof(size_t), input_data_size); } std::vector buffer(capacity); uint8_t* host_data = nullptr; uint8_t* host_ptr = nullptr; uint8_t* device_data = nullptr; size_t output_data_size = 0; // Allocate buffers and copy input data. capacity = std::max(capacity, output_data_size); buffer.resize(capacity); host_data = buffer.data(); host_ptr = Eigen::serialize(host_data, input_data_size); host_ptr = Eigen::serialize(host_ptr, args...); // Copy inputs to host. gpuMalloc((void**)(&device_data), capacity); gpuMemcpy(device_data, buffer.data(), input_data_size, gpuMemcpyHostToDevice); GPU_CHECK(gpuDeviceSynchronize()); // Run kernel. #ifdef EIGEN_USE_HIP hipLaunchKernelGGL( HIP_KERNEL_NAME(run_serialized_on_gpu_meta_kernel), 1, 1, 0, 0, kernel, device_data, capacity); #else run_serialized_on_gpu_meta_kernel<<<1,1>>>( kernel, device_data, capacity); #endif // Check pre-launch and kernel execution errors. GPU_CHECK(gpuGetLastError()); GPU_CHECK(gpuDeviceSynchronize()); // Copy back new output to host. gpuMemcpy(host_data, device_data, capacity, gpuMemcpyDeviceToHost); gpuFree(device_data); GPU_CHECK(gpuDeviceSynchronize()); // Determine output buffer size. host_ptr = Eigen::deserialize(host_data, output_data_size); // If the output doesn't fit in the buffer, spit out warning and fail. if (output_data_size > capacity) { std::cerr << "The serialized output does not fit in the output buffer, " << output_data_size << " vs capacity " << capacity << "." << std::endl << "Try specifying a minimum buffer capacity: " << std::endl << " run_with_hint(" << output_data_size << ", ...)" << std::endl; VERIFY(false); } // Deserialize outputs. auto args_tuple = test_detail::tie(args...); EIGEN_UNUSED_VARIABLE(args_tuple) // Avoid NVCC compile warning. host_ptr = Eigen::deserialize(host_ptr, test_detail::get(args_tuple)...); // Maybe deserialize return value, properly handling void. typename void_helper::ReturnType result; host_ptr = Eigen::deserialize(host_ptr, result); return void_helper::restore(result); } #endif // EIGEN_GPUCC } // namespace internal /** * Runs a kernel on the CPU, returning the results. * \param kernel kernel to run. * \param args ... input arguments. * \return kernel(args...). */ template auto run_on_cpu(Kernel kernel, Args&&... args) -> decltype(kernel(args...)){ return kernel(std::forward(args)...); } #ifdef EIGEN_GPUCC /** * Runs a kernel on the GPU, returning the results. * * The kernel must be able to be passed directly as an input to a global * function (i.e. empty or POD). Its inputs must be "Serializable" so we * can transfer them to the device, and the output must be a Serializable value * type so it can be transferred back from the device. * * \param kernel kernel to run. * \param args ... input arguments, must be "Serializable". * \return kernel(args...). */ template auto run_on_gpu(Kernel kernel, Args&&... args) -> decltype(kernel(args...)){ return internal::run_serialized_on_gpu( /*buffer_capacity_hint=*/ 0, internal::make_index_sequence{}, internal::extract_output_indices{}, kernel, std::forward(args)...); } /** * Runs a kernel on the GPU, returning the results. * * This version allows specifying a minimum buffer capacity size required for * serializing the puts to transfer results from device to host. Use this when * `run_on_gpu(...)` fails to determine an appropriate capacity by default. * * \param buffer_capacity_hint minimum required buffer size for serializing * outputs. * \param kernel kernel to run. * \param args ... input arguments, must be "Serializable". * \return kernel(args...). * \sa run_on_gpu */ template auto run_on_gpu_with_hint(size_t buffer_capacity_hint, Kernel kernel, Args&&... args) -> decltype(kernel(args...)){ return internal::run_serialized_on_gpu( buffer_capacity_hint, internal::make_index_sequence{}, internal::extract_output_indices{}, kernel, std::forward(args)...); } /** * Kernel for determining basic Eigen compile-time information * (i.e. the cuda/hip arch) */ struct CompileTimeDeviceInfoKernel { struct Info { int cuda; int hip; }; EIGEN_DEVICE_FUNC Info operator()() const { Info info = {-1, -1}; #if defined(__CUDA_ARCH__) info.cuda = static_cast(__CUDA_ARCH__ +0); #endif #if defined(EIGEN_HIP_DEVICE_COMPILE) info.hip = static_cast(EIGEN_HIP_DEVICE_COMPILE +0); #endif return info; } }; /** * Queries and prints the compile-time and runtime GPU info. */ void print_gpu_device_info() { int device = 0; gpuDeviceProp_t deviceProp; gpuGetDeviceProperties(&deviceProp, device); auto info = run_on_gpu(CompileTimeDeviceInfoKernel()); std::cout << "GPU compile-time info:\n"; #ifdef EIGEN_CUDACC std::cout << " EIGEN_CUDACC: " << int(EIGEN_CUDACC) << std::endl; #endif #ifdef EIGEN_CUDA_SDK_VER std::cout << " EIGEN_CUDA_SDK_VER: " << int(EIGEN_CUDA_SDK_VER) << std::endl; #endif #ifdef EIGEN_COMP_NVCC std::cout << " EIGEN_COMP_NVCC: " << int(EIGEN_COMP_NVCC) << std::endl; #endif #ifdef EIGEN_HIPCC std::cout << " EIGEN_HIPCC: " << int(EIGEN_HIPCC) << std::endl; #endif std::cout << " EIGEN_CUDA_ARCH: " << info.cuda << std::endl; std::cout << " EIGEN_HIP_DEVICE_COMPILE: " << info.hip << std::endl; std::cout << "GPU device info:\n"; std::cout << " name: " << deviceProp.name << std::endl; std::cout << " capability: " << deviceProp.major << "." << deviceProp.minor << std::endl; std::cout << " multiProcessorCount: " << deviceProp.multiProcessorCount << std::endl; std::cout << " maxThreadsPerMultiProcessor: " << deviceProp.maxThreadsPerMultiProcessor << std::endl; std::cout << " warpSize: " << deviceProp.warpSize << std::endl; std::cout << " regsPerBlock: " << deviceProp.regsPerBlock << std::endl; std::cout << " concurrentKernels: " << deviceProp.concurrentKernels << std::endl; std::cout << " clockRate: " << deviceProp.clockRate << std::endl; std::cout << " canMapHostMemory: " << deviceProp.canMapHostMemory << std::endl; std::cout << " computeMode: " << deviceProp.computeMode << std::endl; } #endif // EIGEN_GPUCC /** * Runs a kernel on the GPU (if EIGEN_GPUCC), or CPU otherwise. * * This is to better support creating generic tests. * * The kernel must be able to be passed directly as an input to a global * function (i.e. empty or POD). Its inputs must be "Serializable" so we * can transfer them to the device, and the output must be a Serializable value * type so it can be transferred back from the device. * * \param kernel kernel to run. * \param args ... input arguments, must be "Serializable". * \return kernel(args...). */ template auto run(Kernel kernel, Args&&... args) -> decltype(kernel(args...)){ #ifdef EIGEN_GPUCC return run_on_gpu(kernel, std::forward(args)...); #else return run_on_cpu(kernel, std::forward(args)...); #endif } /** * Runs a kernel on the GPU (if EIGEN_GPUCC), or CPU otherwise. * * This version allows specifying a minimum buffer capacity size required for * serializing the puts to transfer results from device to host. Use this when * `run(...)` fails to determine an appropriate capacity by default. * * \param buffer_capacity_hint minimum required buffer size for serializing * outputs. * \param kernel kernel to run. * \param args ... input arguments, must be "Serializable". * \return kernel(args...). * \sa run */ template auto run_with_hint(size_t buffer_capacity_hint, Kernel kernel, Args&&... args) -> decltype(kernel(args...)){ #ifdef EIGEN_GPUCC return run_on_gpu_with_hint(buffer_capacity_hint, kernel, std::forward(args)...); #else EIGEN_UNUSED_VARIABLE(buffer_capacity_hint) return run_on_cpu(kernel, std::forward(args)...); #endif } } // namespace Eigen #endif // GPU_TEST_HELPER_H ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/half_float.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include #include "main.h" #include #define VERIFY_HALF_BITS_EQUAL(h, bits) \ VERIFY_IS_EQUAL((numext::bit_cast(h)), (static_cast(bits))) // Make sure it's possible to forward declare Eigen::half namespace Eigen { struct half; } using Eigen::half; void test_conversion() { using Eigen::half_impl::__half_raw; // Round-trip bit-cast with uint16. VERIFY_IS_EQUAL( numext::bit_cast(numext::bit_cast(half(1.0f))), half(1.0f)); VERIFY_IS_EQUAL( numext::bit_cast(numext::bit_cast(half(0.5f))), half(0.5f)); VERIFY_IS_EQUAL( numext::bit_cast(numext::bit_cast(half(-0.33333f))), half(-0.33333f)); VERIFY_IS_EQUAL( numext::bit_cast(numext::bit_cast(half(0.0f))), half(0.0f)); // Conversion from float. VERIFY_HALF_BITS_EQUAL(half(1.0f), 0x3c00); VERIFY_HALF_BITS_EQUAL(half(0.5f), 0x3800); VERIFY_HALF_BITS_EQUAL(half(0.33333f), 0x3555); VERIFY_HALF_BITS_EQUAL(half(0.0f), 0x0000); VERIFY_HALF_BITS_EQUAL(half(-0.0f), 0x8000); VERIFY_HALF_BITS_EQUAL(half(65504.0f), 0x7bff); VERIFY_HALF_BITS_EQUAL(half(65536.0f), 0x7c00); // Becomes infinity. // Denormals. VERIFY_HALF_BITS_EQUAL(half(-5.96046e-08f), 0x8001); VERIFY_HALF_BITS_EQUAL(half(5.96046e-08f), 0x0001); VERIFY_HALF_BITS_EQUAL(half(1.19209e-07f), 0x0002); // Verify round-to-nearest-even behavior. float val1 = float(half(__half_raw(0x3c00))); float val2 = float(half(__half_raw(0x3c01))); float val3 = float(half(__half_raw(0x3c02))); VERIFY_HALF_BITS_EQUAL(half(0.5f * (val1 + val2)), 0x3c00); VERIFY_HALF_BITS_EQUAL(half(0.5f * (val2 + val3)), 0x3c02); // Conversion from int. VERIFY_HALF_BITS_EQUAL(half(-1), 0xbc00); VERIFY_HALF_BITS_EQUAL(half(0), 0x0000); VERIFY_HALF_BITS_EQUAL(half(1), 0x3c00); VERIFY_HALF_BITS_EQUAL(half(2), 0x4000); VERIFY_HALF_BITS_EQUAL(half(3), 0x4200); // Conversion from bool. VERIFY_HALF_BITS_EQUAL(half(false), 0x0000); VERIFY_HALF_BITS_EQUAL(half(true), 0x3c00); // Conversion to float. VERIFY_IS_EQUAL(float(half(__half_raw(0x0000))), 0.0f); VERIFY_IS_EQUAL(float(half(__half_raw(0x3c00))), 1.0f); // Denormals. VERIFY_IS_APPROX(float(half(__half_raw(0x8001))), -5.96046e-08f); VERIFY_IS_APPROX(float(half(__half_raw(0x0001))), 5.96046e-08f); VERIFY_IS_APPROX(float(half(__half_raw(0x0002))), 1.19209e-07f); // NaNs and infinities. VERIFY(!(numext::isinf)(float(half(65504.0f)))); // Largest finite number. VERIFY(!(numext::isnan)(float(half(0.0f)))); VERIFY((numext::isinf)(float(half(__half_raw(0xfc00))))); VERIFY((numext::isnan)(float(half(__half_raw(0xfc01))))); VERIFY((numext::isinf)(float(half(__half_raw(0x7c00))))); VERIFY((numext::isnan)(float(half(__half_raw(0x7c01))))); #if !EIGEN_COMP_MSVC // Visual Studio errors out on divisions by 0 VERIFY((numext::isnan)(float(half(0.0 / 0.0)))); VERIFY((numext::isinf)(float(half(1.0 / 0.0)))); VERIFY((numext::isinf)(float(half(-1.0 / 0.0)))); #endif // Exactly same checks as above, just directly on the half representation. VERIFY(!(numext::isinf)(half(__half_raw(0x7bff)))); VERIFY(!(numext::isnan)(half(__half_raw(0x0000)))); VERIFY((numext::isinf)(half(__half_raw(0xfc00)))); VERIFY((numext::isnan)(half(__half_raw(0xfc01)))); VERIFY((numext::isinf)(half(__half_raw(0x7c00)))); VERIFY((numext::isnan)(half(__half_raw(0x7c01)))); #if !EIGEN_COMP_MSVC // Visual Studio errors out on divisions by 0 VERIFY((numext::isnan)(half(0.0 / 0.0))); VERIFY((numext::isinf)(half(1.0 / 0.0))); VERIFY((numext::isinf)(half(-1.0 / 0.0))); #endif // Conversion to bool VERIFY(!static_cast(half(0.0))); VERIFY(!static_cast(half(-0.0))); VERIFY(static_cast(half(__half_raw(0x7bff)))); VERIFY(static_cast(half(-0.33333))); VERIFY(static_cast(half(1.0))); VERIFY(static_cast(half(-1.0))); VERIFY(static_cast(half(-5.96046e-08f))); } void test_numtraits() { std::cout << "epsilon = " << NumTraits::epsilon() << " (0x" << std::hex << numext::bit_cast(NumTraits::epsilon()) << ")" << std::endl; std::cout << "highest = " << NumTraits::highest() << " (0x" << std::hex << numext::bit_cast(NumTraits::highest()) << ")" << std::endl; std::cout << "lowest = " << NumTraits::lowest() << " (0x" << std::hex << numext::bit_cast(NumTraits::lowest()) << ")" << std::endl; std::cout << "min = " << (std::numeric_limits::min)() << " (0x" << std::hex << numext::bit_cast(half((std::numeric_limits::min)())) << ")" << std::endl; std::cout << "denorm min = " << (std::numeric_limits::denorm_min)() << " (0x" << std::hex << numext::bit_cast(half((std::numeric_limits::denorm_min)())) << ")" << std::endl; std::cout << "infinity = " << NumTraits::infinity() << " (0x" << std::hex << numext::bit_cast(NumTraits::infinity()) << ")" << std::endl; std::cout << "quiet nan = " << NumTraits::quiet_NaN() << " (0x" << std::hex << numext::bit_cast(NumTraits::quiet_NaN()) << ")" << std::endl; std::cout << "signaling nan = " << std::numeric_limits::signaling_NaN() << " (0x" << std::hex << numext::bit_cast(std::numeric_limits::signaling_NaN()) << ")" << std::endl; VERIFY(NumTraits::IsSigned); VERIFY_IS_EQUAL( numext::bit_cast(std::numeric_limits::infinity()), numext::bit_cast(half(std::numeric_limits::infinity())) ); // There is no guarantee that casting a 32-bit NaN to 16-bit has a precise // bit pattern. We test that it is in fact a NaN, then test the signaling // bit (msb of significand is 1 for quiet, 0 for signaling). const numext::uint16_t HALF_QUIET_BIT = 0x0200; VERIFY( (numext::isnan)(std::numeric_limits::quiet_NaN()) && (numext::isnan)(half(std::numeric_limits::quiet_NaN())) && ((numext::bit_cast(std::numeric_limits::quiet_NaN()) & HALF_QUIET_BIT) > 0) && ((numext::bit_cast(half(std::numeric_limits::quiet_NaN())) & HALF_QUIET_BIT) > 0) ); // After a cast to half, a signaling NaN may become non-signaling // (e.g. in the case of casting float to native __fp16). Thus, we check that // both are NaN, and that only the `numeric_limits` version is signaling. VERIFY( (numext::isnan)(std::numeric_limits::signaling_NaN()) && (numext::isnan)(half(std::numeric_limits::signaling_NaN())) && ((numext::bit_cast(std::numeric_limits::signaling_NaN()) & HALF_QUIET_BIT) == 0) ); VERIFY( (std::numeric_limits::min)() > half(0.f) ); VERIFY( (std::numeric_limits::denorm_min)() > half(0.f) ); VERIFY( (std::numeric_limits::min)()/half(2) > half(0.f) ); VERIFY_IS_EQUAL( (std::numeric_limits::denorm_min)()/half(2), half(0.f) ); } void test_arithmetic() { VERIFY_IS_EQUAL(float(half(2) + half(2)), 4); VERIFY_IS_EQUAL(float(half(2) + half(-2)), 0); VERIFY_IS_APPROX(float(half(0.33333f) + half(0.66667f)), 1.0f); VERIFY_IS_EQUAL(float(half(2.0f) * half(-5.5f)), -11.0f); VERIFY_IS_APPROX(float(half(1.0f) / half(3.0f)), 0.33333f); VERIFY_IS_EQUAL(float(-half(4096.0f)), -4096.0f); VERIFY_IS_EQUAL(float(-half(-4096.0f)), 4096.0f); half x(3); half y = ++x; VERIFY_IS_EQUAL(x, half(4)); VERIFY_IS_EQUAL(y, half(4)); y = --x; VERIFY_IS_EQUAL(x, half(3)); VERIFY_IS_EQUAL(y, half(3)); y = x++; VERIFY_IS_EQUAL(x, half(4)); VERIFY_IS_EQUAL(y, half(3)); y = x--; VERIFY_IS_EQUAL(x, half(3)); VERIFY_IS_EQUAL(y, half(4)); } void test_comparison() { VERIFY(half(1.0f) > half(0.5f)); VERIFY(half(0.5f) < half(1.0f)); VERIFY(!(half(1.0f) < half(0.5f))); VERIFY(!(half(0.5f) > half(1.0f))); VERIFY(!(half(4.0f) > half(4.0f))); VERIFY(!(half(4.0f) < half(4.0f))); VERIFY(!(half(0.0f) < half(-0.0f))); VERIFY(!(half(-0.0f) < half(0.0f))); VERIFY(!(half(0.0f) > half(-0.0f))); VERIFY(!(half(-0.0f) > half(0.0f))); VERIFY(half(0.2f) > half(-1.0f)); VERIFY(half(-1.0f) < half(0.2f)); VERIFY(half(-16.0f) < half(-15.0f)); VERIFY(half(1.0f) == half(1.0f)); VERIFY(half(1.0f) != half(2.0f)); // Comparisons with NaNs and infinities. #if !EIGEN_COMP_MSVC // Visual Studio errors out on divisions by 0 VERIFY(!(half(0.0 / 0.0) == half(0.0 / 0.0))); VERIFY(half(0.0 / 0.0) != half(0.0 / 0.0)); VERIFY(!(half(1.0) == half(0.0 / 0.0))); VERIFY(!(half(1.0) < half(0.0 / 0.0))); VERIFY(!(half(1.0) > half(0.0 / 0.0))); VERIFY(half(1.0) != half(0.0 / 0.0)); VERIFY(half(1.0) < half(1.0 / 0.0)); VERIFY(half(1.0) > half(-1.0 / 0.0)); #endif } void test_basic_functions() { VERIFY_IS_EQUAL(float(numext::abs(half(3.5f))), 3.5f); VERIFY_IS_EQUAL(float(abs(half(3.5f))), 3.5f); VERIFY_IS_EQUAL(float(numext::abs(half(-3.5f))), 3.5f); VERIFY_IS_EQUAL(float(abs(half(-3.5f))), 3.5f); VERIFY_IS_EQUAL(float(numext::floor(half(3.5f))), 3.0f); VERIFY_IS_EQUAL(float(floor(half(3.5f))), 3.0f); VERIFY_IS_EQUAL(float(numext::floor(half(-3.5f))), -4.0f); VERIFY_IS_EQUAL(float(floor(half(-3.5f))), -4.0f); VERIFY_IS_EQUAL(float(numext::ceil(half(3.5f))), 4.0f); VERIFY_IS_EQUAL(float(ceil(half(3.5f))), 4.0f); VERIFY_IS_EQUAL(float(numext::ceil(half(-3.5f))), -3.0f); VERIFY_IS_EQUAL(float(ceil(half(-3.5f))), -3.0f); VERIFY_IS_APPROX(float(numext::sqrt(half(0.0f))), 0.0f); VERIFY_IS_APPROX(float(sqrt(half(0.0f))), 0.0f); VERIFY_IS_APPROX(float(numext::sqrt(half(4.0f))), 2.0f); VERIFY_IS_APPROX(float(sqrt(half(4.0f))), 2.0f); VERIFY_IS_APPROX(float(numext::pow(half(0.0f), half(1.0f))), 0.0f); VERIFY_IS_APPROX(float(pow(half(0.0f), half(1.0f))), 0.0f); VERIFY_IS_APPROX(float(numext::pow(half(2.0f), half(2.0f))), 4.0f); VERIFY_IS_APPROX(float(pow(half(2.0f), half(2.0f))), 4.0f); VERIFY_IS_EQUAL(float(numext::exp(half(0.0f))), 1.0f); VERIFY_IS_EQUAL(float(exp(half(0.0f))), 1.0f); VERIFY_IS_APPROX(float(numext::exp(half(EIGEN_PI))), 20.f + float(EIGEN_PI)); VERIFY_IS_APPROX(float(exp(half(EIGEN_PI))), 20.f + float(EIGEN_PI)); VERIFY_IS_EQUAL(float(numext::expm1(half(0.0f))), 0.0f); VERIFY_IS_EQUAL(float(expm1(half(0.0f))), 0.0f); VERIFY_IS_APPROX(float(numext::expm1(half(2.0f))), 6.3890561f); VERIFY_IS_APPROX(float(expm1(half(2.0f))), 6.3890561f); VERIFY_IS_EQUAL(float(numext::log(half(1.0f))), 0.0f); VERIFY_IS_EQUAL(float(log(half(1.0f))), 0.0f); VERIFY_IS_APPROX(float(numext::log(half(10.0f))), 2.30273f); VERIFY_IS_APPROX(float(log(half(10.0f))), 2.30273f); VERIFY_IS_EQUAL(float(numext::log1p(half(0.0f))), 0.0f); VERIFY_IS_EQUAL(float(log1p(half(0.0f))), 0.0f); VERIFY_IS_APPROX(float(numext::log1p(half(10.0f))), 2.3978953f); VERIFY_IS_APPROX(float(log1p(half(10.0f))), 2.3978953f); VERIFY_IS_APPROX(numext::fmod(half(5.3f), half(2.0f)), half(1.3f)); VERIFY_IS_APPROX(fmod(half(5.3f), half(2.0f)), half(1.3f)); VERIFY_IS_APPROX(numext::fmod(half(-18.5f), half(-4.2f)), half(-1.7f)); VERIFY_IS_APPROX(fmod(half(-18.5f), half(-4.2f)), half(-1.7f)); } void test_trigonometric_functions() { VERIFY_IS_APPROX(numext::cos(half(0.0f)), half(cosf(0.0f))); VERIFY_IS_APPROX(cos(half(0.0f)), half(cosf(0.0f))); VERIFY_IS_APPROX(numext::cos(half(EIGEN_PI)), half(cosf(EIGEN_PI))); // VERIFY_IS_APPROX(numext::cos(half(EIGEN_PI/2)), half(cosf(EIGEN_PI/2))); // VERIFY_IS_APPROX(numext::cos(half(3*EIGEN_PI/2)), half(cosf(3*EIGEN_PI/2))); VERIFY_IS_APPROX(numext::cos(half(3.5f)), half(cosf(3.5f))); VERIFY_IS_APPROX(numext::sin(half(0.0f)), half(sinf(0.0f))); VERIFY_IS_APPROX(sin(half(0.0f)), half(sinf(0.0f))); // VERIFY_IS_APPROX(numext::sin(half(EIGEN_PI)), half(sinf(EIGEN_PI))); VERIFY_IS_APPROX(numext::sin(half(EIGEN_PI/2)), half(sinf(EIGEN_PI/2))); VERIFY_IS_APPROX(numext::sin(half(3*EIGEN_PI/2)), half(sinf(3*EIGEN_PI/2))); VERIFY_IS_APPROX(numext::sin(half(3.5f)), half(sinf(3.5f))); VERIFY_IS_APPROX(numext::tan(half(0.0f)), half(tanf(0.0f))); VERIFY_IS_APPROX(tan(half(0.0f)), half(tanf(0.0f))); // VERIFY_IS_APPROX(numext::tan(half(EIGEN_PI)), half(tanf(EIGEN_PI))); // VERIFY_IS_APPROX(numext::tan(half(EIGEN_PI/2)), half(tanf(EIGEN_PI/2))); //VERIFY_IS_APPROX(numext::tan(half(3*EIGEN_PI/2)), half(tanf(3*EIGEN_PI/2))); VERIFY_IS_APPROX(numext::tan(half(3.5f)), half(tanf(3.5f))); } void test_array() { typedef Array ArrayXh; Index size = internal::random(1,10); Index i = internal::random(0,size-1); ArrayXh a1 = ArrayXh::Random(size), a2 = ArrayXh::Random(size); VERIFY_IS_APPROX( a1+a1, half(2)*a1 ); VERIFY( (a1.abs() >= half(0)).all() ); VERIFY_IS_APPROX( (a1*a1).sqrt(), a1.abs() ); VERIFY( ((a1.min)(a2) <= (a1.max)(a2)).all() ); a1(i) = half(-10.); VERIFY_IS_EQUAL( a1.minCoeff(), half(-10.) ); a1(i) = half(10.); VERIFY_IS_EQUAL( a1.maxCoeff(), half(10.) ); std::stringstream ss; ss << a1; } void test_product() { typedef Matrix MatrixXh; Index rows = internal::random(1,EIGEN_TEST_MAX_SIZE); Index cols = internal::random(1,EIGEN_TEST_MAX_SIZE); Index depth = internal::random(1,EIGEN_TEST_MAX_SIZE); MatrixXh Ah = MatrixXh::Random(rows,depth); MatrixXh Bh = MatrixXh::Random(depth,cols); MatrixXh Ch = MatrixXh::Random(rows,cols); MatrixXf Af = Ah.cast(); MatrixXf Bf = Bh.cast(); MatrixXf Cf = Ch.cast(); VERIFY_IS_APPROX(Ch.noalias()+=Ah*Bh, (Cf.noalias()+=Af*Bf).cast()); } EIGEN_DECLARE_TEST(half_float) { CALL_SUBTEST(test_numtraits()); for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST(test_conversion()); CALL_SUBTEST(test_arithmetic()); CALL_SUBTEST(test_comparison()); CALL_SUBTEST(test_basic_functions()); CALL_SUBTEST(test_trigonometric_functions()); CALL_SUBTEST(test_array()); CALL_SUBTEST(test_product()); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/hessenberg.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2010 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include template void hessenberg(int size = Size) { typedef Matrix MatrixType; // Test basic functionality: A = U H U* and H is Hessenberg for(int counter = 0; counter < g_repeat; ++counter) { MatrixType m = MatrixType::Random(size,size); HessenbergDecomposition hess(m); MatrixType Q = hess.matrixQ(); MatrixType H = hess.matrixH(); VERIFY_IS_APPROX(m, Q * H * Q.adjoint()); for(int row = 2; row < size; ++row) { for(int col = 0; col < row-1; ++col) { VERIFY(H(row,col) == (typename MatrixType::Scalar)0); } } } // Test whether compute() and constructor returns same result MatrixType A = MatrixType::Random(size, size); HessenbergDecomposition cs1; cs1.compute(A); HessenbergDecomposition cs2(A); VERIFY_IS_EQUAL(cs1.matrixH().eval(), cs2.matrixH().eval()); MatrixType cs1Q = cs1.matrixQ(); MatrixType cs2Q = cs2.matrixQ(); VERIFY_IS_EQUAL(cs1Q, cs2Q); // Test assertions for when used uninitialized HessenbergDecomposition hessUninitialized; VERIFY_RAISES_ASSERT( hessUninitialized.matrixH() ); VERIFY_RAISES_ASSERT( hessUninitialized.matrixQ() ); VERIFY_RAISES_ASSERT( hessUninitialized.householderCoefficients() ); VERIFY_RAISES_ASSERT( hessUninitialized.packedMatrix() ); // TODO: Add tests for packedMatrix() and householderCoefficients() } EIGEN_DECLARE_TEST(hessenberg) { CALL_SUBTEST_1(( hessenberg,1>() )); CALL_SUBTEST_2(( hessenberg,2>() )); CALL_SUBTEST_3(( hessenberg,4>() )); CALL_SUBTEST_4(( hessenberg(internal::random(1,EIGEN_TEST_MAX_SIZE)) )); CALL_SUBTEST_5(( hessenberg,Dynamic>(internal::random(1,EIGEN_TEST_MAX_SIZE)) )); // Test problem size constructors CALL_SUBTEST_6(HessenbergDecomposition(10)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/householder.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include template void householder(const MatrixType& m) { static bool even = true; even = !even; /* this test covers the following files: Householder.h */ Index rows = m.rows(); Index cols = m.cols(); typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef Matrix VectorType; typedef Matrix::ret, 1> EssentialVectorType; typedef Matrix SquareMatrixType; typedef Matrix HBlockMatrixType; typedef Matrix HCoeffsVectorType; typedef Matrix TMatrixType; Matrix _tmp((std::max)(rows,cols)); Scalar* tmp = &_tmp.coeffRef(0,0); Scalar beta; RealScalar alpha; EssentialVectorType essential; VectorType v1 = VectorType::Random(rows), v2; v2 = v1; v1.makeHouseholder(essential, beta, alpha); v1.applyHouseholderOnTheLeft(essential,beta,tmp); VERIFY_IS_APPROX(v1.norm(), v2.norm()); if(rows>=2) VERIFY_IS_MUCH_SMALLER_THAN(v1.tail(rows-1).norm(), v1.norm()); v1 = VectorType::Random(rows); v2 = v1; v1.applyHouseholderOnTheLeft(essential,beta,tmp); VERIFY_IS_APPROX(v1.norm(), v2.norm()); // reconstruct householder matrix: SquareMatrixType id, H1, H2; id.setIdentity(rows, rows); H1 = H2 = id; VectorType vv(rows); vv << Scalar(1), essential; H1.applyHouseholderOnTheLeft(essential, beta, tmp); H2.applyHouseholderOnTheRight(essential, beta, tmp); VERIFY_IS_APPROX(H1, H2); VERIFY_IS_APPROX(H1, id - beta * vv*vv.adjoint()); MatrixType m1(rows, cols), m2(rows, cols); v1 = VectorType::Random(rows); if(even) v1.tail(rows-1).setZero(); m1.colwise() = v1; m2 = m1; m1.col(0).makeHouseholder(essential, beta, alpha); m1.applyHouseholderOnTheLeft(essential,beta,tmp); VERIFY_IS_APPROX(m1.norm(), m2.norm()); if(rows>=2) VERIFY_IS_MUCH_SMALLER_THAN(m1.block(1,0,rows-1,cols).norm(), m1.norm()); VERIFY_IS_MUCH_SMALLER_THAN(numext::imag(m1(0,0)), numext::real(m1(0,0))); VERIFY_IS_APPROX(numext::real(m1(0,0)), alpha); v1 = VectorType::Random(rows); if(even) v1.tail(rows-1).setZero(); SquareMatrixType m3(rows,rows), m4(rows,rows); m3.rowwise() = v1.transpose(); m4 = m3; m3.row(0).makeHouseholder(essential, beta, alpha); m3.applyHouseholderOnTheRight(essential.conjugate(),beta,tmp); VERIFY_IS_APPROX(m3.norm(), m4.norm()); if(rows>=2) VERIFY_IS_MUCH_SMALLER_THAN(m3.block(0,1,rows,rows-1).norm(), m3.norm()); VERIFY_IS_MUCH_SMALLER_THAN(numext::imag(m3(0,0)), numext::real(m3(0,0))); VERIFY_IS_APPROX(numext::real(m3(0,0)), alpha); // test householder sequence on the left with a shift Index shift = internal::random(0, std::max(rows-2,0)); Index brows = rows - shift; m1.setRandom(rows, cols); HBlockMatrixType hbm = m1.block(shift,0,brows,cols); HouseholderQR qr(hbm); m2 = m1; m2.block(shift,0,brows,cols) = qr.matrixQR(); HCoeffsVectorType hc = qr.hCoeffs().conjugate(); HouseholderSequence hseq(m2, hc); hseq.setLength(hc.size()).setShift(shift); VERIFY(hseq.length() == hc.size()); VERIFY(hseq.shift() == shift); MatrixType m5 = m2; m5.block(shift,0,brows,cols).template triangularView().setZero(); VERIFY_IS_APPROX(hseq * m5, m1); // test applying hseq directly m3 = hseq; VERIFY_IS_APPROX(m3 * m5, m1); // test evaluating hseq to a dense matrix, then applying SquareMatrixType hseq_mat = hseq; SquareMatrixType hseq_mat_conj = hseq.conjugate(); SquareMatrixType hseq_mat_adj = hseq.adjoint(); SquareMatrixType hseq_mat_trans = hseq.transpose(); SquareMatrixType m6 = SquareMatrixType::Random(rows, rows); VERIFY_IS_APPROX(hseq_mat.adjoint(), hseq_mat_adj); VERIFY_IS_APPROX(hseq_mat.conjugate(), hseq_mat_conj); VERIFY_IS_APPROX(hseq_mat.transpose(), hseq_mat_trans); VERIFY_IS_APPROX(hseq * m6, hseq_mat * m6); VERIFY_IS_APPROX(hseq.adjoint() * m6, hseq_mat_adj * m6); VERIFY_IS_APPROX(hseq.conjugate() * m6, hseq_mat_conj * m6); VERIFY_IS_APPROX(hseq.transpose() * m6, hseq_mat_trans * m6); VERIFY_IS_APPROX(m6 * hseq, m6 * hseq_mat); VERIFY_IS_APPROX(m6 * hseq.adjoint(), m6 * hseq_mat_adj); VERIFY_IS_APPROX(m6 * hseq.conjugate(), m6 * hseq_mat_conj); VERIFY_IS_APPROX(m6 * hseq.transpose(), m6 * hseq_mat_trans); // test householder sequence on the right with a shift TMatrixType tm2 = m2.transpose(); HouseholderSequence rhseq(tm2, hc); rhseq.setLength(hc.size()).setShift(shift); VERIFY_IS_APPROX(rhseq * m5, m1); // test applying rhseq directly m3 = rhseq; VERIFY_IS_APPROX(m3 * m5, m1); // test evaluating rhseq to a dense matrix, then applying } EIGEN_DECLARE_TEST(householder) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( householder(Matrix()) ); CALL_SUBTEST_2( householder(Matrix()) ); CALL_SUBTEST_3( householder(Matrix()) ); CALL_SUBTEST_4( householder(Matrix()) ); CALL_SUBTEST_5( householder(MatrixXd(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( householder(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_7( householder(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_8( householder(Matrix()) ); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/incomplete_cholesky.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015-2016 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // #define EIGEN_DONT_VECTORIZE // #define EIGEN_MAX_ALIGN_BYTES 0 #include "sparse_solver.h" #include #include template void test_incomplete_cholesky_T() { typedef SparseMatrix SparseMatrixType; ConjugateGradient > > cg_illt_lower_amd; ConjugateGradient > > cg_illt_lower_nat; ConjugateGradient > > cg_illt_upper_amd; ConjugateGradient > > cg_illt_upper_nat; ConjugateGradient > > cg_illt_uplo_amd; CALL_SUBTEST( check_sparse_spd_solving(cg_illt_lower_amd) ); CALL_SUBTEST( check_sparse_spd_solving(cg_illt_lower_nat) ); CALL_SUBTEST( check_sparse_spd_solving(cg_illt_upper_amd) ); CALL_SUBTEST( check_sparse_spd_solving(cg_illt_upper_nat) ); CALL_SUBTEST( check_sparse_spd_solving(cg_illt_uplo_amd) ); } template void bug1150() { // regression for bug 1150 for(int N = 1; N<20; ++N) { Eigen::MatrixXd b( N, N ); b.setOnes(); Eigen::SparseMatrix m( N, N ); m.reserve(Eigen::VectorXi::Constant(N,4)); for( int i = 0; i < N; ++i ) { m.insert( i, i ) = 1; m.coeffRef( i, i / 2 ) = 2; m.coeffRef( i, i / 3 ) = 2; m.coeffRef( i, i / 4 ) = 2; } Eigen::SparseMatrix A; A = m * m.transpose(); Eigen::ConjugateGradient, Eigen::Lower | Eigen::Upper, Eigen::IncompleteCholesky > solver( A ); VERIFY(solver.preconditioner().info() == Eigen::Success); VERIFY(solver.info() == Eigen::Success); } } EIGEN_DECLARE_TEST(incomplete_cholesky) { CALL_SUBTEST_1(( test_incomplete_cholesky_T() )); CALL_SUBTEST_2(( test_incomplete_cholesky_T, int>() )); CALL_SUBTEST_3(( test_incomplete_cholesky_T() )); CALL_SUBTEST_1(( bug1150<0>() )); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/indexed_view.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2017 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifdef EIGEN_TEST_PART_2 // Make sure we also check c++11 max implementation #define EIGEN_MAX_CPP_VER 11 #endif #ifdef EIGEN_TEST_PART_3 // Make sure we also check c++98 max implementation #define EIGEN_MAX_CPP_VER 03 // We need to disable this warning when compiling with c++11 while limiting Eigen to c++98 // Ideally we would rather configure the compiler to build in c++98 mode but this needs // to be done at the CMakeLists.txt level. #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) #pragma GCC diagnostic ignored "-Wdeprecated" #endif #if defined(__GNUC__) && (__GNUC__ >=9) #pragma GCC diagnostic ignored "-Wdeprecated-copy" #endif #if defined(__clang__) && (__clang_major__ >= 10) #pragma clang diagnostic ignored "-Wdeprecated-copy" #endif #endif #include #include #include "main.h" using Eigen::placeholders::all; using Eigen::placeholders::last; using Eigen::placeholders::lastp1; #if EIGEN_HAS_CXX11 using Eigen::placeholders::lastN; #include #endif typedef std::pair IndexPair; int encode(Index i, Index j) { return int(i*100 + j); } IndexPair decode(Index ij) { return IndexPair(ij / 100, ij % 100); } template bool match(const T& xpr, std::string ref, std::string str_xpr = "") { EIGEN_UNUSED_VARIABLE(str_xpr); std::stringstream str; str << xpr; if(!(str.str() == ref)) std::cout << str_xpr << "\n" << xpr << "\n\n"; return str.str() == ref; } #define MATCH(X,R) match(X, R, #X) template typename internal::enable_if::value,bool>::type is_same_eq(const T1& a, const T2& b) { return (a == b).all(); } template bool is_same_seq(const T1& a, const T2& b) { bool ok = a.first()==b.first() && a.size() == b.size() && Index(a.incrObject())==Index(b.incrObject());; if(!ok) { std::cerr << "seqN(" << a.first() << ", " << a.size() << ", " << Index(a.incrObject()) << ") != "; std::cerr << "seqN(" << b.first() << ", " << b.size() << ", " << Index(b.incrObject()) << ")\n"; } return ok; } template typename internal::enable_if::value,bool>::type is_same_seq_type(const T1& a, const T2& b) { return is_same_seq(a,b); } #define VERIFY_EQ_INT(A,B) VERIFY_IS_APPROX(int(A),int(B)) // C++03 does not allow local or unnamed enums as index enum DummyEnum { XX=0, YY=1 }; void check_indexed_view() { Index n = 10; ArrayXd a = ArrayXd::LinSpaced(n,0,n-1); Array b = a.transpose(); #if EIGEN_COMP_CXXVER>=14 ArrayXXi A = ArrayXXi::NullaryExpr(n,n, std::ref(encode)); #else ArrayXXi A = ArrayXXi::NullaryExpr(n,n, std::ptr_fun(&encode)); #endif for(Index i=0; i vali(4); Map(&vali[0],4) = eii; std::vector veci(4); Map(veci.data(),4) = eii; VERIFY( MATCH( A(3, seq(9,3,-1)), "309 308 307 306 305 304 303") ); VERIFY( MATCH( A(seqN(2,5), seq(9,3,-1)), "209 208 207 206 205 204 203\n" "309 308 307 306 305 304 303\n" "409 408 407 406 405 404 403\n" "509 508 507 506 505 504 503\n" "609 608 607 606 605 604 603") ); VERIFY( MATCH( A(seqN(2,5), 5), "205\n" "305\n" "405\n" "505\n" "605") ); VERIFY( MATCH( A(seqN(last,5,-1), seq(2,last)), "902 903 904 905 906 907 908 909\n" "802 803 804 805 806 807 808 809\n" "702 703 704 705 706 707 708 709\n" "602 603 604 605 606 607 608 609\n" "502 503 504 505 506 507 508 509") ); VERIFY( MATCH( A(eii, veci), "303 301 306 305\n" "103 101 106 105\n" "603 601 606 605\n" "503 501 506 505") ); VERIFY( MATCH( A(eii, all), "300 301 302 303 304 305 306 307 308 309\n" "100 101 102 103 104 105 106 107 108 109\n" "600 601 602 603 604 605 606 607 608 609\n" "500 501 502 503 504 505 506 507 508 509") ); // take row number 3, and repeat it 5 times VERIFY( MATCH( A(seqN(3,5,0), all), "300 301 302 303 304 305 306 307 308 309\n" "300 301 302 303 304 305 306 307 308 309\n" "300 301 302 303 304 305 306 307 308 309\n" "300 301 302 303 304 305 306 307 308 309\n" "300 301 302 303 304 305 306 307 308 309") ); VERIFY( MATCH( a(seqN(3,3),0), "3\n4\n5" ) ); VERIFY( MATCH( a(seq(3,5)), "3\n4\n5" ) ); VERIFY( MATCH( a(seqN(3,3,1)), "3\n4\n5" ) ); VERIFY( MATCH( a(seqN(5,3,-1)), "5\n4\n3" ) ); VERIFY( MATCH( b(0,seqN(3,3)), "3 4 5" ) ); VERIFY( MATCH( b(seq(3,5)), "3 4 5" ) ); VERIFY( MATCH( b(seqN(3,3,1)), "3 4 5" ) ); VERIFY( MATCH( b(seqN(5,3,-1)), "5 4 3" ) ); VERIFY( MATCH( b(all), "0 1 2 3 4 5 6 7 8 9" ) ); VERIFY( MATCH( b(eii), "3 1 6 5" ) ); Array44i B; B.setRandom(); VERIFY( (A(seqN(2,5), 5)).ColsAtCompileTime == 1); VERIFY( (A(seqN(2,5), 5)).RowsAtCompileTime == Dynamic); VERIFY_EQ_INT( (A(seqN(2,5), 5)).InnerStrideAtCompileTime , A.InnerStrideAtCompileTime); VERIFY_EQ_INT( (A(seqN(2,5), 5)).OuterStrideAtCompileTime , A.col(5).OuterStrideAtCompileTime); VERIFY_EQ_INT( (A(5,seqN(2,5))).InnerStrideAtCompileTime , A.row(5).InnerStrideAtCompileTime); VERIFY_EQ_INT( (A(5,seqN(2,5))).OuterStrideAtCompileTime , A.row(5).OuterStrideAtCompileTime); VERIFY_EQ_INT( (B(1,seqN(1,2))).InnerStrideAtCompileTime , B.row(1).InnerStrideAtCompileTime); VERIFY_EQ_INT( (B(1,seqN(1,2))).OuterStrideAtCompileTime , B.row(1).OuterStrideAtCompileTime); VERIFY_EQ_INT( (A(seqN(2,5), seq(1,3))).InnerStrideAtCompileTime , A.InnerStrideAtCompileTime); VERIFY_EQ_INT( (A(seqN(2,5), seq(1,3))).OuterStrideAtCompileTime , A.OuterStrideAtCompileTime); VERIFY_EQ_INT( (B(seqN(1,2), seq(1,3))).InnerStrideAtCompileTime , B.InnerStrideAtCompileTime); VERIFY_EQ_INT( (B(seqN(1,2), seq(1,3))).OuterStrideAtCompileTime , B.OuterStrideAtCompileTime); VERIFY_EQ_INT( (A(seqN(2,5,2), seq(1,3,2))).InnerStrideAtCompileTime , Dynamic); VERIFY_EQ_INT( (A(seqN(2,5,2), seq(1,3,2))).OuterStrideAtCompileTime , Dynamic); VERIFY_EQ_INT( (A(seqN(2,5,fix<2>), seq(1,3,fix<3>))).InnerStrideAtCompileTime , 2); VERIFY_EQ_INT( (A(seqN(2,5,fix<2>), seq(1,3,fix<3>))).OuterStrideAtCompileTime , Dynamic); VERIFY_EQ_INT( (B(seqN(1,2,fix<2>), seq(1,3,fix<3>))).InnerStrideAtCompileTime , 2); VERIFY_EQ_INT( (B(seqN(1,2,fix<2>), seq(1,3,fix<3>))).OuterStrideAtCompileTime , 3*4); VERIFY_EQ_INT( (A(seqN(2,fix<5>), seqN(1,fix<3>))).RowsAtCompileTime, 5); VERIFY_EQ_INT( (A(seqN(2,fix<5>), seqN(1,fix<3>))).ColsAtCompileTime, 3); VERIFY_EQ_INT( (A(seqN(2,fix<5>(5)), seqN(1,fix<3>(3)))).RowsAtCompileTime, 5); VERIFY_EQ_INT( (A(seqN(2,fix<5>(5)), seqN(1,fix<3>(3)))).ColsAtCompileTime, 3); VERIFY_EQ_INT( (A(seqN(2,fix(5)), seqN(1,fix(3)))).RowsAtCompileTime, Dynamic); VERIFY_EQ_INT( (A(seqN(2,fix(5)), seqN(1,fix(3)))).ColsAtCompileTime, Dynamic); VERIFY_EQ_INT( (A(seqN(2,fix(5)), seqN(1,fix(3)))).rows(), 5); VERIFY_EQ_INT( (A(seqN(2,fix(5)), seqN(1,fix(3)))).cols(), 3); VERIFY( is_same_seq_type( seqN(2,5,fix<-1>), seqN(2,5,fix<-1>(-1)) ) ); VERIFY( is_same_seq_type( seqN(2,5), seqN(2,5,fix<1>(1)) ) ); VERIFY( is_same_seq_type( seqN(2,5,3), seqN(2,5,fix(3)) ) ); VERIFY( is_same_seq_type( seq(2,7,fix<3>), seqN(2,2,fix<3>) ) ); VERIFY( is_same_seq_type( seqN(2,fix(5),3), seqN(2,5,fix(3)) ) ); VERIFY( is_same_seq_type( seqN(2,fix<5>(5),fix<-2>), seqN(2,fix<5>,fix<-2>()) ) ); VERIFY( is_same_seq_type( seq(2,fix<5>), seqN(2,4) ) ); #if EIGEN_HAS_CXX11 VERIFY( is_same_seq_type( seq(fix<2>,fix<5>), seqN(fix<2>,fix<4>) ) ); VERIFY( is_same_seq( seqN(2,std::integral_constant(),std::integral_constant()), seqN(2,fix<5>,fix<-2>()) ) ); VERIFY( is_same_seq( seq(std::integral_constant(),std::integral_constant(),std::integral_constant()), seq(fix<1>,fix<5>,fix<2>()) ) ); VERIFY( is_same_seq_type( seqN(2,std::integral_constant(),std::integral_constant()), seqN(2,fix<5>,fix<-2>()) ) ); VERIFY( is_same_seq_type( seq(std::integral_constant(),std::integral_constant(),std::integral_constant()), seq(fix<1>,fix<5>,fix<2>()) ) ); VERIFY( is_same_seq_type( seqN(2,std::integral_constant()), seqN(2,fix<5>) ) ); VERIFY( is_same_seq_type( seq(std::integral_constant(),std::integral_constant()), seq(fix<1>,fix<5>) ) ); #else // sorry, no compile-time size recovery in c++98/03 VERIFY( is_same_seq( seq(fix<2>,fix<5>), seqN(fix<2>,fix<4>) ) ); #endif VERIFY( (A(seqN(2,fix<5>), 5)).RowsAtCompileTime == 5); VERIFY( (A(4, all)).ColsAtCompileTime == Dynamic); VERIFY( (A(4, all)).RowsAtCompileTime == 1); VERIFY( (B(1, all)).ColsAtCompileTime == 4); VERIFY( (B(1, all)).RowsAtCompileTime == 1); VERIFY( (B(all,1)).ColsAtCompileTime == 1); VERIFY( (B(all,1)).RowsAtCompileTime == 4); VERIFY(int( (A(all, eii)).ColsAtCompileTime) == int(eii.SizeAtCompileTime)); VERIFY_EQ_INT( (A(eii, eii)).Flags&DirectAccessBit, (unsigned int)(0)); VERIFY_EQ_INT( (A(eii, eii)).InnerStrideAtCompileTime, 0); VERIFY_EQ_INT( (A(eii, eii)).OuterStrideAtCompileTime, 0); VERIFY_IS_APPROX( A(seq(n-1,2,-2), seqN(n-1-6,3,-1)), A(seq(last,2,fix<-2>), seqN(last-6,3,fix<-1>)) ); VERIFY_IS_APPROX( A(seq(n-1,2,-2), seqN(n-1-6,4)), A(seq(last,2,-2), seqN(last-6,4)) ); VERIFY_IS_APPROX( A(seq(n-1-6,n-1-2), seqN(n-1-6,4)), A(seq(last-6,last-2), seqN(6+last-6-6,4)) ); VERIFY_IS_APPROX( A(seq((n-1)/2,(n)/2+3), seqN(2,4)), A(seq(last/2,(last+1)/2+3), seqN(last+2-last,4)) ); VERIFY_IS_APPROX( A(seq(n-2,2,-2), seqN(n-8,4)), A(seq(lastp1-2,2,-2), seqN(lastp1-8,4)) ); // Check all combinations of seq: VERIFY_IS_APPROX( A(seq(1,n-1-2,2), seq(1,n-1-2,2)), A(seq(1,last-2,2), seq(1,last-2,fix<2>)) ); VERIFY_IS_APPROX( A(seq(n-1-5,n-1-2,2), seq(n-1-5,n-1-2,2)), A(seq(last-5,last-2,2), seq(last-5,last-2,fix<2>)) ); VERIFY_IS_APPROX( A(seq(n-1-5,7,2), seq(n-1-5,7,2)), A(seq(last-5,7,2), seq(last-5,7,fix<2>)) ); VERIFY_IS_APPROX( A(seq(1,n-1-2), seq(n-1-5,7)), A(seq(1,last-2), seq(last-5,7)) ); VERIFY_IS_APPROX( A(seq(n-1-5,n-1-2), seq(n-1-5,n-1-2)), A(seq(last-5,last-2), seq(last-5,last-2)) ); VERIFY_IS_APPROX( A.col(A.cols()-1), A(all,last) ); VERIFY_IS_APPROX( A(A.rows()-2, A.cols()/2), A(last-1, lastp1/2) ); VERIFY_IS_APPROX( a(a.size()-2), a(last-1) ); VERIFY_IS_APPROX( a(a.size()/2), a((last+1)/2) ); // Check fall-back to Block { VERIFY( is_same_eq(A.col(0), A(all,0)) ); VERIFY( is_same_eq(A.row(0), A(0,all)) ); VERIFY( is_same_eq(A.block(0,0,2,2), A(seqN(0,2),seq(0,1))) ); VERIFY( is_same_eq(A.middleRows(2,4), A(seqN(2,4),all)) ); VERIFY( is_same_eq(A.middleCols(2,4), A(all,seqN(2,4))) ); VERIFY( is_same_eq(A.col(A.cols()-1), A(all,last)) ); const ArrayXXi& cA(A); VERIFY( is_same_eq(cA.col(0), cA(all,0)) ); VERIFY( is_same_eq(cA.row(0), cA(0,all)) ); VERIFY( is_same_eq(cA.block(0,0,2,2), cA(seqN(0,2),seq(0,1))) ); VERIFY( is_same_eq(cA.middleRows(2,4), cA(seqN(2,4),all)) ); VERIFY( is_same_eq(cA.middleCols(2,4), cA(all,seqN(2,4))) ); VERIFY( is_same_eq(a.head(4), a(seq(0,3))) ); VERIFY( is_same_eq(a.tail(4), a(seqN(last-3,4))) ); VERIFY( is_same_eq(a.tail(4), a(seq(lastp1-4,last))) ); VERIFY( is_same_eq(a.segment<4>(3), a(seqN(3,fix<4>))) ); } ArrayXXi A1=A, A2 = ArrayXXi::Random(4,4); ArrayXi range25(4); range25 << 3,2,4,5; A1(seqN(3,4),seq(2,5)) = A2; VERIFY_IS_APPROX( A1.block(3,2,4,4), A2 ); A1 = A; A2.setOnes(); A1(seq(6,3,-1),range25) = A2; VERIFY_IS_APPROX( A1.block(3,2,4,4), A2 ); // check reverse { VERIFY( is_same_seq_type( seq(3,7).reverse(), seqN(7,5,fix<-1>) ) ); VERIFY( is_same_seq_type( seq(7,3,fix<-2>).reverse(), seqN(3,3,fix<2>) ) ); VERIFY_IS_APPROX( a(seqN(2,last/2).reverse()), a(seqN(2+(last/2-1)*1,last/2,fix<-1>)) ); VERIFY_IS_APPROX( a(seqN(last/2,fix<4>).reverse()),a(seqN(last/2,fix<4>)).reverse() ); VERIFY_IS_APPROX( A(seq(last-5,last-1,2).reverse(), seqN(last-3,3,fix<-2>).reverse()), A(seq(last-5,last-1,2), seqN(last-3,3,fix<-2>)).reverse() ); } #if EIGEN_HAS_CXX11 // check lastN VERIFY_IS_APPROX( a(lastN(3)), a.tail(3) ); VERIFY( MATCH( a(lastN(3)), "7\n8\n9" ) ); VERIFY_IS_APPROX( a(lastN(fix<3>())), a.tail<3>() ); VERIFY( MATCH( a(lastN(3,2)), "5\n7\n9" ) ); VERIFY( MATCH( a(lastN(3,fix<2>())), "5\n7\n9" ) ); VERIFY( a(lastN(fix<3>())).SizeAtCompileTime == 3 ); VERIFY( (A(all, std::array{{1,3,2,4}})).ColsAtCompileTime == 4); VERIFY_IS_APPROX( (A(std::array{{1,3,5}}, std::array{{9,6,3,0}})), A(seqN(1,3,2), seqN(9,4,-3)) ); #if EIGEN_HAS_STATIC_ARRAY_TEMPLATE VERIFY_IS_APPROX( A({3, 1, 6, 5}, all), A(std::array{{3, 1, 6, 5}}, all) ); VERIFY_IS_APPROX( A(all,{3, 1, 6, 5}), A(all,std::array{{3, 1, 6, 5}}) ); VERIFY_IS_APPROX( A({1,3,5},{3, 1, 6, 5}), A(std::array{{1,3,5}},std::array{{3, 1, 6, 5}}) ); VERIFY_IS_EQUAL( A({1,3,5},{3, 1, 6, 5}).RowsAtCompileTime, 3 ); VERIFY_IS_EQUAL( A({1,3,5},{3, 1, 6, 5}).ColsAtCompileTime, 4 ); VERIFY_IS_APPROX( a({3, 1, 6, 5}), a(std::array{{3, 1, 6, 5}}) ); VERIFY_IS_EQUAL( a({1,3,5}).SizeAtCompileTime, 3 ); VERIFY_IS_APPROX( b({3, 1, 6, 5}), b(std::array{{3, 1, 6, 5}}) ); VERIFY_IS_EQUAL( b({1,3,5}).SizeAtCompileTime, 3 ); #endif #endif // check mat(i,j) with weird types for i and j { VERIFY_IS_APPROX( A(B.RowsAtCompileTime-1, 1), A(3,1) ); VERIFY_IS_APPROX( A(B.RowsAtCompileTime, 1), A(4,1) ); VERIFY_IS_APPROX( A(B.RowsAtCompileTime-1, B.ColsAtCompileTime-1), A(3,3) ); VERIFY_IS_APPROX( A(B.RowsAtCompileTime, B.ColsAtCompileTime), A(4,4) ); const Index I_ = 3, J_ = 4; VERIFY_IS_APPROX( A(I_,J_), A(3,4) ); } // check extended block API { VERIFY( is_same_eq( A.block<3,4>(1,1), A.block(1,1,fix<3>,fix<4>)) ); VERIFY( is_same_eq( A.block<3,4>(1,1,3,4), A.block(1,1,fix<3>(),fix<4>(4))) ); VERIFY( is_same_eq( A.block<3,Dynamic>(1,1,3,4), A.block(1,1,fix<3>,4)) ); VERIFY( is_same_eq( A.block(1,1,3,4), A.block(1,1,fix(3),fix<4>)) ); VERIFY( is_same_eq( A.block(1,1,3,4), A.block(1,1,fix(3),fix(4))) ); VERIFY( is_same_eq( A.topLeftCorner<3,4>(), A.topLeftCorner(fix<3>,fix<4>)) ); VERIFY( is_same_eq( A.bottomLeftCorner<3,4>(), A.bottomLeftCorner(fix<3>,fix<4>)) ); VERIFY( is_same_eq( A.bottomRightCorner<3,4>(), A.bottomRightCorner(fix<3>,fix<4>)) ); VERIFY( is_same_eq( A.topRightCorner<3,4>(), A.topRightCorner(fix<3>,fix<4>)) ); VERIFY( is_same_eq( A.leftCols<3>(), A.leftCols(fix<3>)) ); VERIFY( is_same_eq( A.rightCols<3>(), A.rightCols(fix<3>)) ); VERIFY( is_same_eq( A.middleCols<3>(1), A.middleCols(1,fix<3>)) ); VERIFY( is_same_eq( A.topRows<3>(), A.topRows(fix<3>)) ); VERIFY( is_same_eq( A.bottomRows<3>(), A.bottomRows(fix<3>)) ); VERIFY( is_same_eq( A.middleRows<3>(1), A.middleRows(1,fix<3>)) ); VERIFY( is_same_eq( a.segment<3>(1), a.segment(1,fix<3>)) ); VERIFY( is_same_eq( a.head<3>(), a.head(fix<3>)) ); VERIFY( is_same_eq( a.tail<3>(), a.tail(fix<3>)) ); const ArrayXXi& cA(A); VERIFY( is_same_eq( cA.block(1,1,3,4), cA.block(1,1,fix(3),fix<4>)) ); VERIFY( is_same_eq( cA.topLeftCorner<3,4>(), cA.topLeftCorner(fix<3>,fix<4>)) ); VERIFY( is_same_eq( cA.bottomLeftCorner<3,4>(), cA.bottomLeftCorner(fix<3>,fix<4>)) ); VERIFY( is_same_eq( cA.bottomRightCorner<3,4>(), cA.bottomRightCorner(fix<3>,fix<4>)) ); VERIFY( is_same_eq( cA.topRightCorner<3,4>(), cA.topRightCorner(fix<3>,fix<4>)) ); VERIFY( is_same_eq( cA.leftCols<3>(), cA.leftCols(fix<3>)) ); VERIFY( is_same_eq( cA.rightCols<3>(), cA.rightCols(fix<3>)) ); VERIFY( is_same_eq( cA.middleCols<3>(1), cA.middleCols(1,fix<3>)) ); VERIFY( is_same_eq( cA.topRows<3>(), cA.topRows(fix<3>)) ); VERIFY( is_same_eq( cA.bottomRows<3>(), cA.bottomRows(fix<3>)) ); VERIFY( is_same_eq( cA.middleRows<3>(1), cA.middleRows(1,fix<3>)) ); } // Check compilation of enums as index type: a(XX) = 1; A(XX,YY) = 1; // Anonymous enums only work with C++11 #if EIGEN_HAS_CXX11 enum { X=0, Y=1 }; a(X) = 1; A(X,Y) = 1; A(XX,Y) = 1; A(X,YY) = 1; #endif // Check compilation of varying integer types as index types: Index i = n/2; short i_short(i); std::size_t i_sizet(i); VERIFY_IS_EQUAL( a(i), a.coeff(i_short) ); VERIFY_IS_EQUAL( a(i), a.coeff(i_sizet) ); VERIFY_IS_EQUAL( A(i,i), A.coeff(i_short, i_short) ); VERIFY_IS_EQUAL( A(i,i), A.coeff(i_short, i) ); VERIFY_IS_EQUAL( A(i,i), A.coeff(i, i_short) ); VERIFY_IS_EQUAL( A(i,i), A.coeff(i, i_sizet) ); VERIFY_IS_EQUAL( A(i,i), A.coeff(i_sizet, i) ); VERIFY_IS_EQUAL( A(i,i), A.coeff(i_sizet, i_short) ); VERIFY_IS_EQUAL( A(i,i), A.coeff(5, i_sizet) ); // Regression test for Max{Rows,Cols}AtCompileTime { Matrix3i A3 = Matrix3i::Random(); ArrayXi ind(5); ind << 1,1,1,1,1; VERIFY_IS_EQUAL( A3(ind,ind).eval(), MatrixXi::Constant(5,5,A3(1,1)) ); } // Regression for bug 1736 { VERIFY_IS_APPROX(A(all, eii).col(0).eval(), A.col(eii(0))); A(all, eii).col(0) = A.col(eii(0)); } // bug 1815: IndexedView should allow linear access { VERIFY( MATCH( b(eii)(0), "3" ) ); VERIFY( MATCH( a(eii)(0), "3" ) ); VERIFY( MATCH( A(1,eii)(0), "103")); VERIFY( MATCH( A(eii,1)(0), "301")); VERIFY( MATCH( A(1,all)(1), "101")); VERIFY( MATCH( A(all,1)(1), "101")); } #if EIGEN_HAS_CXX11 //Bug IndexView with a single static row should be RowMajor: { // A(1, seq(0,2,1)).cwiseAbs().colwise().replicate(2).eval(); STATIC_CHECK(( (internal::evaluator::Flags & RowMajorBit) == RowMajorBit )); } #endif } EIGEN_DECLARE_TEST(indexed_view) { // for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( check_indexed_view() ); CALL_SUBTEST_2( check_indexed_view() ); CALL_SUBTEST_3( check_indexed_view() ); // } // static checks of some internals: STATIC_CHECK(( internal::is_valid_index_type::value )); STATIC_CHECK(( internal::is_valid_index_type::value )); STATIC_CHECK(( internal::is_valid_index_type::value )); STATIC_CHECK(( internal::is_valid_index_type::value )); STATIC_CHECK(( internal::is_valid_index_type::value )); STATIC_CHECK(( !internal::valid_indexed_view_overload::value )); STATIC_CHECK(( !internal::valid_indexed_view_overload::value )); STATIC_CHECK(( !internal::valid_indexed_view_overload::value )); STATIC_CHECK(( !internal::valid_indexed_view_overload::value )); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/initializer_list_construction.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2019 David Tellenbach // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #if defined(__GNUC__) && __GNUC__ >= 10 // GCC 10+ has a bug for unsigned char that thinks we're writing past the // end of an array when compiled with -O3. This warning is not triggered for // any other types, nor for other compilers, nor for other optimization levels. #pragma GCC diagnostic ignored "-Wstringop-overflow" #endif #include "main.h" template::IsInteger> struct TestMethodDispatching { static void run() {} }; template struct TestMethodDispatching { static void run() { { Matrix m {3, 4}; Array a {3, 4}; VERIFY(m.rows() == 3); VERIFY(m.cols() == 4); VERIFY(a.rows() == 3); VERIFY(a.cols() == 4); } { Matrix m {3, 4}; Array a {3, 4}; VERIFY(m(0) == 3); VERIFY(m(1) == 4); VERIFY(a(0) == 3); VERIFY(a(1) == 4); } { Matrix m {3, 4}; Array a {3, 4}; VERIFY(m(0) == 3); VERIFY(m(1) == 4); VERIFY(a(0) == 3); VERIFY(a(1) == 4); } } }; template void fixedsizeVariadicVectorConstruction2() { { Vec4 ref = Vec4::Random(); Vec4 v{ ref[0], ref[1], ref[2], ref[3] }; VERIFY_IS_APPROX(v, ref); VERIFY_IS_APPROX(v, (Vec4( ref[0], ref[1], ref[2], ref[3] ))); VERIFY_IS_APPROX(v, (Vec4({ref[0], ref[1], ref[2], ref[3]}))); Vec4 v2 = { ref[0], ref[1], ref[2], ref[3] }; VERIFY_IS_APPROX(v2, ref); } { Vec5 ref = Vec5::Random(); Vec5 v{ ref[0], ref[1], ref[2], ref[3], ref[4] }; VERIFY_IS_APPROX(v, ref); VERIFY_IS_APPROX(v, (Vec5( ref[0], ref[1], ref[2], ref[3], ref[4] ))); VERIFY_IS_APPROX(v, (Vec5({ref[0], ref[1], ref[2], ref[3], ref[4]}))); Vec5 v2 = { ref[0], ref[1], ref[2], ref[3], ref[4] }; VERIFY_IS_APPROX(v2, ref); } } #define CHECK_MIXSCALAR_V5_APPROX(V, A0, A1, A2, A3, A4) { \ VERIFY_IS_APPROX(V[0], Scalar(A0) ); \ VERIFY_IS_APPROX(V[1], Scalar(A1) ); \ VERIFY_IS_APPROX(V[2], Scalar(A2) ); \ VERIFY_IS_APPROX(V[3], Scalar(A3) ); \ VERIFY_IS_APPROX(V[4], Scalar(A4) ); \ } #define CHECK_MIXSCALAR_V5(VEC5, A0, A1, A2, A3, A4) { \ typedef VEC5::Scalar Scalar; \ VEC5 v = { A0 , A1 , A2 , A3 , A4 }; \ CHECK_MIXSCALAR_V5_APPROX(v, A0 , A1 , A2 , A3 , A4); \ } template void fixedsizeVariadicVectorConstruction3() { typedef Matrix Vec5; typedef Array Arr5; CHECK_MIXSCALAR_V5(Vec5, 1, 2., -3, 4.121, 5.53252); CHECK_MIXSCALAR_V5(Arr5, 1, 2., 3.12f, 4.121, 5.53252); } template void fixedsizeVariadicVectorConstruction() { CALL_SUBTEST(( fixedsizeVariadicVectorConstruction2, Matrix >() )); CALL_SUBTEST(( fixedsizeVariadicVectorConstruction2, Matrix >() )); CALL_SUBTEST(( fixedsizeVariadicVectorConstruction2, Array >() )); CALL_SUBTEST(( fixedsizeVariadicVectorConstruction2, Array >() )); } template void initializerListVectorConstruction() { Scalar raw[4]; for(int k = 0; k < 4; ++k) { raw[k] = internal::random(); } { Matrix m { {raw[0]}, {raw[1]},{raw[2]},{raw[3]} }; Array a { {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }; for(int k = 0; k < 4; ++k) { VERIFY(m(k) == raw[k]); } for(int k = 0; k < 4; ++k) { VERIFY(a(k) == raw[k]); } VERIFY_IS_EQUAL(m, (Matrix({ {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }))); VERIFY((a == (Array({ {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }))).all()); } { Matrix m { {raw[0], raw[1], raw[2], raw[3]} }; Array a { {raw[0], raw[1], raw[2], raw[3]} }; for(int k = 0; k < 4; ++k) { VERIFY(m(k) == raw[k]); } for(int k = 0; k < 4; ++k) { VERIFY(a(k) == raw[k]); } VERIFY_IS_EQUAL(m, (Matrix({{raw[0],raw[1],raw[2],raw[3]}}))); VERIFY((a == (Array({{raw[0],raw[1],raw[2],raw[3]}}))).all()); } { Matrix m { {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }; Array a { {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }; for(int k=0; k < 4; ++k) { VERIFY(m(k) == raw[k]); } for(int k=0; k < 4; ++k) { VERIFY(a(k) == raw[k]); } VERIFY_IS_EQUAL(m, (Matrix({ {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }))); VERIFY((a == (Array({ {raw[0]}, {raw[1]}, {raw[2]}, {raw[3]} }))).all()); } { Matrix m {{raw[0],raw[1],raw[2],raw[3]}}; Array a {{raw[0],raw[1],raw[2],raw[3]}}; for(int k=0; k < 4; ++k) { VERIFY(m(k) == raw[k]); } for(int k=0; k < 4; ++k) { VERIFY(a(k) == raw[k]); } VERIFY_IS_EQUAL(m, (Matrix({{raw[0],raw[1],raw[2],raw[3]}}))); VERIFY((a == (Array({{raw[0],raw[1],raw[2],raw[3]}}))).all()); } } template void initializerListMatrixConstruction() { const Index RowsAtCompileTime = 5; const Index ColsAtCompileTime = 4; const Index SizeAtCompileTime = RowsAtCompileTime * ColsAtCompileTime; Scalar raw[SizeAtCompileTime]; for (int i = 0; i < SizeAtCompileTime; ++i) { raw[i] = internal::random(); } { Matrix m {}; VERIFY(m.cols() == 0); VERIFY(m.rows() == 0); VERIFY_IS_EQUAL(m, (Matrix())); } { Matrix m { {raw[0], raw[1], raw[2], raw[3]}, {raw[4], raw[5], raw[6], raw[7]}, {raw[8], raw[9], raw[10], raw[11]}, {raw[12], raw[13], raw[14], raw[15]}, {raw[16], raw[17], raw[18], raw[19]} }; Matrix m2; m2 << raw[0], raw[1], raw[2], raw[3], raw[4], raw[5], raw[6], raw[7], raw[8], raw[9], raw[10], raw[11], raw[12], raw[13], raw[14], raw[15], raw[16], raw[17], raw[18], raw[19]; int k = 0; for(int i = 0; i < RowsAtCompileTime; ++i) { for (int j = 0; j < ColsAtCompileTime; ++j) { VERIFY(m(i, j) == raw[k]); ++k; } } VERIFY_IS_EQUAL(m, m2); } { Matrix m{ {raw[0], raw[1], raw[2], raw[3]}, {raw[4], raw[5], raw[6], raw[7]}, {raw[8], raw[9], raw[10], raw[11]}, {raw[12], raw[13], raw[14], raw[15]}, {raw[16], raw[17], raw[18], raw[19]} }; VERIFY(m.cols() == 4); VERIFY(m.rows() == 5); int k = 0; for(int i = 0; i < RowsAtCompileTime; ++i) { for (int j = 0; j < ColsAtCompileTime; ++j) { VERIFY(m(i, j) == raw[k]); ++k; } } Matrix m2(RowsAtCompileTime, ColsAtCompileTime); k = 0; for(int i = 0; i < RowsAtCompileTime; ++i) { for (int j = 0; j < ColsAtCompileTime; ++j) { m2(i, j) = raw[k]; ++k; } } VERIFY_IS_EQUAL(m, m2); } } template void initializerListArrayConstruction() { const Index RowsAtCompileTime = 5; const Index ColsAtCompileTime = 4; const Index SizeAtCompileTime = RowsAtCompileTime * ColsAtCompileTime; Scalar raw[SizeAtCompileTime]; for (int i = 0; i < SizeAtCompileTime; ++i) { raw[i] = internal::random(); } { Array a {}; VERIFY(a.cols() == 0); VERIFY(a.rows() == 0); } { Array m { {raw[0], raw[1], raw[2], raw[3]}, {raw[4], raw[5], raw[6], raw[7]}, {raw[8], raw[9], raw[10], raw[11]}, {raw[12], raw[13], raw[14], raw[15]}, {raw[16], raw[17], raw[18], raw[19]} }; Array m2; m2 << raw[0], raw[1], raw[2], raw[3], raw[4], raw[5], raw[6], raw[7], raw[8], raw[9], raw[10], raw[11], raw[12], raw[13], raw[14], raw[15], raw[16], raw[17], raw[18], raw[19]; int k = 0; for(int i = 0; i < RowsAtCompileTime; ++i) { for (int j = 0; j < ColsAtCompileTime; ++j) { VERIFY(m(i, j) == raw[k]); ++k; } } VERIFY_IS_APPROX(m, m2); } { Array m { {raw[0], raw[1], raw[2], raw[3]}, {raw[4], raw[5], raw[6], raw[7]}, {raw[8], raw[9], raw[10], raw[11]}, {raw[12], raw[13], raw[14], raw[15]}, {raw[16], raw[17], raw[18], raw[19]} }; VERIFY(m.cols() == 4); VERIFY(m.rows() == 5); int k = 0; for(int i = 0; i < RowsAtCompileTime; ++i) { for (int j = 0; j < ColsAtCompileTime; ++j) { VERIFY(m(i, j) == raw[k]); ++k; } } Array m2(RowsAtCompileTime, ColsAtCompileTime); k = 0; for(int i = 0; i < RowsAtCompileTime; ++i) { for (int j = 0; j < ColsAtCompileTime; ++j) { m2(i, j) = raw[k]; ++k; } } VERIFY_IS_APPROX(m, m2); } } template void dynamicVectorConstruction() { const Index size = 4; Scalar raw[size]; for (int i = 0; i < size; ++i) { raw[i] = internal::random(); } typedef Matrix VectorX; { VectorX v {{raw[0], raw[1], raw[2], raw[3]}}; for (int i = 0; i < size; ++i) { VERIFY(v(i) == raw[i]); } VERIFY(v.rows() == size); VERIFY(v.cols() == 1); VERIFY_IS_EQUAL(v, (VectorX {{raw[0], raw[1], raw[2], raw[3]}})); } } EIGEN_DECLARE_TEST(initializer_list_construction) { CALL_SUBTEST_1(initializerListVectorConstruction()); CALL_SUBTEST_1(initializerListVectorConstruction()); CALL_SUBTEST_1(initializerListVectorConstruction()); CALL_SUBTEST_1(initializerListVectorConstruction()); CALL_SUBTEST_1(initializerListVectorConstruction()); CALL_SUBTEST_1(initializerListVectorConstruction()); CALL_SUBTEST_1(initializerListVectorConstruction>()); CALL_SUBTEST_1(initializerListVectorConstruction>()); CALL_SUBTEST_2(initializerListMatrixConstruction()); CALL_SUBTEST_2(initializerListMatrixConstruction()); CALL_SUBTEST_2(initializerListMatrixConstruction()); CALL_SUBTEST_2(initializerListMatrixConstruction()); CALL_SUBTEST_2(initializerListMatrixConstruction()); CALL_SUBTEST_2(initializerListMatrixConstruction()); CALL_SUBTEST_2(initializerListMatrixConstruction>()); CALL_SUBTEST_2(initializerListMatrixConstruction>()); CALL_SUBTEST_3(initializerListArrayConstruction()); CALL_SUBTEST_3(initializerListArrayConstruction()); CALL_SUBTEST_3(initializerListArrayConstruction()); CALL_SUBTEST_3(initializerListArrayConstruction()); CALL_SUBTEST_3(initializerListArrayConstruction()); CALL_SUBTEST_3(initializerListArrayConstruction()); CALL_SUBTEST_3(initializerListArrayConstruction>()); CALL_SUBTEST_3(initializerListArrayConstruction>()); CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction()); CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction()); CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction()); CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction()); CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction()); CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction()); CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction>()); CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction>()); CALL_SUBTEST_4(fixedsizeVariadicVectorConstruction3<0>()); CALL_SUBTEST_5(TestMethodDispatching::run()); CALL_SUBTEST_5(TestMethodDispatching::run()); CALL_SUBTEST_6(dynamicVectorConstruction()); CALL_SUBTEST_6(dynamicVectorConstruction()); CALL_SUBTEST_6(dynamicVectorConstruction()); CALL_SUBTEST_6(dynamicVectorConstruction()); CALL_SUBTEST_6(dynamicVectorConstruction()); CALL_SUBTEST_6(dynamicVectorConstruction()); CALL_SUBTEST_6(dynamicVectorConstruction>()); CALL_SUBTEST_6(dynamicVectorConstruction>()); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/inplace_decomposition.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include #include #include // This file test inplace decomposition through Ref<>, as supported by Cholesky, LU, and QR decompositions. template void inplace(bool square = false, bool SPD = false) { typedef typename MatrixType::Scalar Scalar; typedef Matrix RhsType; typedef Matrix ResType; Index rows = MatrixType::RowsAtCompileTime==Dynamic ? internal::random(2,EIGEN_TEST_MAX_SIZE/2) : Index(MatrixType::RowsAtCompileTime); Index cols = MatrixType::ColsAtCompileTime==Dynamic ? (square?rows:internal::random(2,rows)) : Index(MatrixType::ColsAtCompileTime); MatrixType A = MatrixType::Random(rows,cols); RhsType b = RhsType::Random(rows); ResType x(cols); if(SPD) { assert(square); A.topRows(cols) = A.topRows(cols).adjoint() * A.topRows(cols); A.diagonal().array() += 1e-3; } MatrixType A0 = A; MatrixType A1 = A; DecType dec(A); // Check that the content of A has been modified VERIFY_IS_NOT_APPROX( A, A0 ); // Check that the decomposition is correct: if(rows==cols) { VERIFY_IS_APPROX( A0 * (x = dec.solve(b)), b ); } else { VERIFY_IS_APPROX( A0.transpose() * A0 * (x = dec.solve(b)), A0.transpose() * b ); } // Check that modifying A breaks the current dec: A.setRandom(); if(rows==cols) { VERIFY_IS_NOT_APPROX( A0 * (x = dec.solve(b)), b ); } else { VERIFY_IS_NOT_APPROX( A0.transpose() * A0 * (x = dec.solve(b)), A0.transpose() * b ); } // Check that calling compute(A1) does not modify A1: A = A0; dec.compute(A1); VERIFY_IS_EQUAL(A0,A1); VERIFY_IS_NOT_APPROX( A, A0 ); if(rows==cols) { VERIFY_IS_APPROX( A0 * (x = dec.solve(b)), b ); } else { VERIFY_IS_APPROX( A0.transpose() * A0 * (x = dec.solve(b)), A0.transpose() * b ); } } EIGEN_DECLARE_TEST(inplace_decomposition) { EIGEN_UNUSED typedef Matrix Matrix43d; for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(( inplace >, MatrixXd>(true,true) )); CALL_SUBTEST_1(( inplace >, Matrix4d>(true,true) )); CALL_SUBTEST_2(( inplace >, MatrixXd>(true,true) )); CALL_SUBTEST_2(( inplace >, Matrix4d>(true,true) )); CALL_SUBTEST_3(( inplace >, MatrixXd>(true,false) )); CALL_SUBTEST_3(( inplace >, Matrix4d>(true,false) )); CALL_SUBTEST_4(( inplace >, MatrixXd>(true,false) )); CALL_SUBTEST_4(( inplace >, Matrix4d>(true,false) )); CALL_SUBTEST_5(( inplace >, MatrixXd>(false,false) )); CALL_SUBTEST_5(( inplace >, Matrix43d>(false,false) )); CALL_SUBTEST_6(( inplace >, MatrixXd>(false,false) )); CALL_SUBTEST_6(( inplace >, Matrix43d>(false,false) )); CALL_SUBTEST_7(( inplace >, MatrixXd>(false,false) )); CALL_SUBTEST_7(( inplace >, Matrix43d>(false,false) )); CALL_SUBTEST_8(( inplace >, MatrixXd>(false,false) )); CALL_SUBTEST_8(( inplace >, Matrix43d>(false,false) )); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/integer_types.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #undef VERIFY_IS_APPROX #define VERIFY_IS_APPROX(a, b) VERIFY((a)==(b)); #undef VERIFY_IS_NOT_APPROX #define VERIFY_IS_NOT_APPROX(a, b) VERIFY((a)!=(b)); template void signed_integer_type_tests(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; enum { is_signed = (Scalar(-1) > Scalar(0)) ? 0 : 1 }; VERIFY(is_signed == 1); Index rows = m.rows(); Index cols = m.cols(); MatrixType m1(rows, cols), m2 = MatrixType::Random(rows, cols), mzero = MatrixType::Zero(rows, cols); do { m1 = MatrixType::Random(rows, cols); } while(m1 == mzero || m1 == m2); // check linear structure Scalar s1; do { s1 = internal::random(); } while(s1 == 0); VERIFY_IS_EQUAL(-(-m1), m1); VERIFY_IS_EQUAL(-m2+m1+m2, m1); VERIFY_IS_EQUAL((-m1+m2)*s1, -s1*m1+s1*m2); } template void integer_type_tests(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; VERIFY(NumTraits::IsInteger); enum { is_signed = (Scalar(-1) > Scalar(0)) ? 0 : 1 }; VERIFY(int(NumTraits::IsSigned) == is_signed); typedef Matrix VectorType; Index rows = m.rows(); Index cols = m.cols(); // this test relies a lot on Random.h, and there's not much more that we can do // to test it, hence I consider that we will have tested Random.h MatrixType m1(rows, cols), m2 = MatrixType::Random(rows, cols), m3(rows, cols), mzero = MatrixType::Zero(rows, cols); typedef Matrix SquareMatrixType; SquareMatrixType identity = SquareMatrixType::Identity(rows, rows), square = SquareMatrixType::Random(rows, rows); VectorType v1(rows), v2 = VectorType::Random(rows), vzero = VectorType::Zero(rows); do { m1 = MatrixType::Random(rows, cols); } while(m1 == mzero || m1 == m2); do { v1 = VectorType::Random(rows); } while(v1 == vzero || v1 == v2); VERIFY_IS_APPROX( v1, v1); VERIFY_IS_NOT_APPROX( v1, 2*v1); VERIFY_IS_APPROX( vzero, v1-v1); VERIFY_IS_APPROX( m1, m1); VERIFY_IS_NOT_APPROX( m1, 2*m1); VERIFY_IS_APPROX( mzero, m1-m1); VERIFY_IS_APPROX(m3 = m1,m1); MatrixType m4; VERIFY_IS_APPROX(m4 = m1,m1); m3.real() = m1.real(); VERIFY_IS_APPROX(static_cast(m3).real(), static_cast(m1).real()); VERIFY_IS_APPROX(static_cast(m3).real(), m1.real()); // check == / != operators VERIFY(m1==m1); VERIFY(m1!=m2); VERIFY(!(m1==m2)); VERIFY(!(m1!=m1)); m1 = m2; VERIFY(m1==m2); VERIFY(!(m1!=m2)); // check linear structure Scalar s1; do { s1 = internal::random(); } while(s1 == 0); VERIFY_IS_EQUAL(m1+m1, 2*m1); VERIFY_IS_EQUAL(m1+m2-m1, m2); VERIFY_IS_EQUAL(m1*s1, s1*m1); VERIFY_IS_EQUAL((m1+m2)*s1, s1*m1+s1*m2); m3 = m2; m3 += m1; VERIFY_IS_EQUAL(m3, m1+m2); m3 = m2; m3 -= m1; VERIFY_IS_EQUAL(m3, m2-m1); m3 = m2; m3 *= s1; VERIFY_IS_EQUAL(m3, s1*m2); // check matrix product. VERIFY_IS_APPROX(identity * m1, m1); VERIFY_IS_APPROX(square * (m1 + m2), square * m1 + square * m2); VERIFY_IS_APPROX((m1 + m2).transpose() * square, m1.transpose() * square + m2.transpose() * square); VERIFY_IS_APPROX((m1 * m2.transpose()) * m1, m1 * (m2.transpose() * m1)); } template void integer_types_extra() { VERIFY_IS_EQUAL(int(internal::scalar_div_cost::value), 8); VERIFY_IS_EQUAL(int(internal::scalar_div_cost::value), 8); if(sizeof(long)>sizeof(int)) { VERIFY(int(internal::scalar_div_cost::value) > int(internal::scalar_div_cost::value)); VERIFY(int(internal::scalar_div_cost::value) > int(internal::scalar_div_cost::value)); } } EIGEN_DECLARE_TEST(integer_types) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( integer_type_tests(Matrix()) ); CALL_SUBTEST_1( integer_type_tests(Matrix()) ); CALL_SUBTEST_2( integer_type_tests(Matrix()) ); CALL_SUBTEST_2( signed_integer_type_tests(Matrix()) ); CALL_SUBTEST_3( integer_type_tests(Matrix(2, 10)) ); CALL_SUBTEST_3( signed_integer_type_tests(Matrix(2, 10)) ); CALL_SUBTEST_4( integer_type_tests(Matrix()) ); CALL_SUBTEST_4( integer_type_tests(Matrix(20, 20)) ); CALL_SUBTEST_5( integer_type_tests(Matrix(7, 4)) ); CALL_SUBTEST_5( signed_integer_type_tests(Matrix(7, 4)) ); CALL_SUBTEST_6( integer_type_tests(Matrix()) ); #if EIGEN_HAS_CXX11 CALL_SUBTEST_7( integer_type_tests(Matrix()) ); CALL_SUBTEST_7( signed_integer_type_tests(Matrix()) ); CALL_SUBTEST_8( integer_type_tests(Matrix(1, 5)) ); #endif } CALL_SUBTEST_9( integer_types_extra<0>() ); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/inverse.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include template void inverse_for_fixed_size(const MatrixType&, typename internal::enable_if::type* = 0) { } template void inverse_for_fixed_size(const MatrixType& m1, typename internal::enable_if::type* = 0) { using std::abs; MatrixType m2, identity = MatrixType::Identity(); typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef Matrix VectorType; //computeInverseAndDetWithCheck tests //First: an invertible matrix bool invertible; Scalar det; m2.setZero(); m1.computeInverseAndDetWithCheck(m2, det, invertible); VERIFY(invertible); VERIFY_IS_APPROX(identity, m1*m2); VERIFY_IS_APPROX(det, m1.determinant()); m2.setZero(); m1.computeInverseWithCheck(m2, invertible); VERIFY(invertible); VERIFY_IS_APPROX(identity, m1*m2); //Second: a rank one matrix (not invertible, except for 1x1 matrices) VectorType v3 = VectorType::Random(); MatrixType m3 = v3*v3.transpose(), m4; m3.computeInverseAndDetWithCheck(m4, det, invertible); VERIFY( m1.rows()==1 ? invertible : !invertible ); VERIFY_IS_MUCH_SMALLER_THAN(abs(det-m3.determinant()), RealScalar(1)); m3.computeInverseWithCheck(m4, invertible); VERIFY( m1.rows()==1 ? invertible : !invertible ); // check with submatrices { Matrix m5; m5.setRandom(); m5.topLeftCorner(m1.rows(),m1.rows()) = m1; m2 = m5.template topLeftCorner().inverse(); VERIFY_IS_APPROX( (m5.template topLeftCorner()), m2.inverse() ); } } template void inverse(const MatrixType& m) { /* this test covers the following files: Inverse.h */ Index rows = m.rows(); Index cols = m.cols(); typedef typename MatrixType::Scalar Scalar; MatrixType m1(rows, cols), m2(rows, cols), identity = MatrixType::Identity(rows, rows); createRandomPIMatrixOfRank(rows,rows,rows,m1); m2 = m1.inverse(); VERIFY_IS_APPROX(m1, m2.inverse() ); VERIFY_IS_APPROX((Scalar(2)*m2).inverse(), m2.inverse()*Scalar(0.5)); VERIFY_IS_APPROX(identity, m1.inverse() * m1 ); VERIFY_IS_APPROX(identity, m1 * m1.inverse() ); VERIFY_IS_APPROX(m1, m1.inverse().inverse() ); // since for the general case we implement separately row-major and col-major, test that VERIFY_IS_APPROX(MatrixType(m1.transpose().inverse()), MatrixType(m1.inverse().transpose())); inverse_for_fixed_size(m1); // check in-place inversion if(MatrixType::RowsAtCompileTime>=2 && MatrixType::RowsAtCompileTime<=4) { // in-place is forbidden VERIFY_RAISES_ASSERT(m1 = m1.inverse()); } else { m2 = m1.inverse(); m1 = m1.inverse(); VERIFY_IS_APPROX(m1,m2); } } template void inverse_zerosized() { Matrix A(0,0); { Matrix b, x; x = A.inverse() * b; } { Matrix b(0,1), x; x = A.inverse() * b; VERIFY_IS_EQUAL(x.rows(), 0); VERIFY_IS_EQUAL(x.cols(), 1); } } EIGEN_DECLARE_TEST(inverse) { int s = 0; for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( inverse(Matrix()) ); CALL_SUBTEST_2( inverse(Matrix2d()) ); CALL_SUBTEST_3( inverse(Matrix3f()) ); CALL_SUBTEST_4( inverse(Matrix4f()) ); CALL_SUBTEST_4( inverse(Matrix()) ); s = internal::random(50,320); CALL_SUBTEST_5( inverse(MatrixXf(s,s)) ); TEST_SET_BUT_UNUSED_VARIABLE(s) CALL_SUBTEST_5( inverse_zerosized() ); CALL_SUBTEST_5( inverse(MatrixXf(0, 0)) ); CALL_SUBTEST_5( inverse(MatrixXf(1, 1)) ); s = internal::random(25,100); CALL_SUBTEST_6( inverse(MatrixXcd(s,s)) ); TEST_SET_BUT_UNUSED_VARIABLE(s) CALL_SUBTEST_7( inverse(Matrix4d()) ); CALL_SUBTEST_7( inverse(Matrix()) ); CALL_SUBTEST_8( inverse(Matrix4cd()) ); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/io.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2019 Joel Holdsworth // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include #include "main.h" template struct check_ostream_impl { static void run() { const Array array(123); std::ostringstream ss; ss << array; VERIFY(ss.str() == "123"); check_ostream_impl< std::complex >::run(); }; }; template<> struct check_ostream_impl { static void run() { const Array array(1, 0); std::ostringstream ss; ss << array; VERIFY(ss.str() == "1 0"); }; }; template struct check_ostream_impl< std::complex > { static void run() { const Array,1,1> array(std::complex(12, 34)); std::ostringstream ss; ss << array; VERIFY(ss.str() == "(12,34)"); }; }; template static void check_ostream() { check_ostream_impl::run(); } EIGEN_DECLARE_TEST(rand) { CALL_SUBTEST(check_ostream()); CALL_SUBTEST(check_ostream()); CALL_SUBTEST(check_ostream()); CALL_SUBTEST(check_ostream()); CALL_SUBTEST(check_ostream()); CALL_SUBTEST(check_ostream()); CALL_SUBTEST(check_ostream()); CALL_SUBTEST(check_ostream()); CALL_SUBTEST(check_ostream()); CALL_SUBTEST(check_ostream()); CALL_SUBTEST(check_ostream()); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/is_same_dense.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" using internal::is_same_dense; EIGEN_DECLARE_TEST(is_same_dense) { typedef Matrix ColMatrixXd; typedef Matrix,Dynamic,Dynamic,ColMajor> ColMatrixXcd; ColMatrixXd m1(10,10); ColMatrixXcd m2(10,10); Ref ref_m1(m1); Ref > ref_m2_real(m2.real()); Ref const_ref_m1(m1); VERIFY(is_same_dense(m1,m1)); VERIFY(is_same_dense(m1,ref_m1)); VERIFY(is_same_dense(const_ref_m1,m1)); VERIFY(is_same_dense(const_ref_m1,ref_m1)); VERIFY(is_same_dense(m1.block(0,0,m1.rows(),m1.cols()),m1)); VERIFY(!is_same_dense(m1.row(0),m1.col(0))); Ref const_ref_m1_row(m1.row(1)); VERIFY(!is_same_dense(m1.row(1),const_ref_m1_row)); Ref const_ref_m1_col(m1.col(1)); VERIFY(is_same_dense(m1.col(1),const_ref_m1_col)); VERIFY(!is_same_dense(m1, ref_m2_real)); VERIFY(!is_same_dense(m2, ref_m2_real)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/jacobi.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include template void jacobi(const MatrixType& m = MatrixType()) { Index rows = m.rows(); Index cols = m.cols(); enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime }; typedef Matrix JacobiVector; const MatrixType a(MatrixType::Random(rows, cols)); JacobiVector v = JacobiVector::Random().normalized(); JacobiScalar c = v.x(), s = v.y(); JacobiRotation rot(c, s); { Index p = internal::random(0, rows-1); Index q; do { q = internal::random(0, rows-1); } while (q == p); MatrixType b = a; b.applyOnTheLeft(p, q, rot); VERIFY_IS_APPROX(b.row(p), c * a.row(p) + numext::conj(s) * a.row(q)); VERIFY_IS_APPROX(b.row(q), -s * a.row(p) + numext::conj(c) * a.row(q)); } { Index p = internal::random(0, cols-1); Index q; do { q = internal::random(0, cols-1); } while (q == p); MatrixType b = a; b.applyOnTheRight(p, q, rot); VERIFY_IS_APPROX(b.col(p), c * a.col(p) - s * a.col(q)); VERIFY_IS_APPROX(b.col(q), numext::conj(s) * a.col(p) + numext::conj(c) * a.col(q)); } } EIGEN_DECLARE_TEST(jacobi) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(( jacobi() )); CALL_SUBTEST_2(( jacobi() )); CALL_SUBTEST_3(( jacobi() )); CALL_SUBTEST_3(( jacobi >() )); int r = internal::random(2, internal::random(1,EIGEN_TEST_MAX_SIZE)/2), c = internal::random(2, internal::random(1,EIGEN_TEST_MAX_SIZE)/2); CALL_SUBTEST_4(( jacobi(MatrixXf(r,c)) )); CALL_SUBTEST_5(( jacobi(MatrixXcd(r,c)) )); CALL_SUBTEST_5(( jacobi >(MatrixXcd(r,c)) )); // complex is really important to test as it is the only way to cover conjugation issues in certain unaligned paths CALL_SUBTEST_6(( jacobi(MatrixXcf(r,c)) )); CALL_SUBTEST_6(( jacobi >(MatrixXcf(r,c)) )); TEST_SET_BUT_UNUSED_VARIABLE(r); TEST_SET_BUT_UNUSED_VARIABLE(c); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/jacobisvd.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud // Copyright (C) 2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // discard stack allocation as that too bypasses malloc #define EIGEN_STACK_ALLOCATION_LIMIT 0 #define EIGEN_RUNTIME_NO_MALLOC #include "main.h" #include #define SVD_DEFAULT(M) JacobiSVD #define SVD_FOR_MIN_NORM(M) JacobiSVD #include "svd_common.h" // Check all variants of JacobiSVD template void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true) { MatrixType m = a; if(pickrandom) svd_fill_random(m); CALL_SUBTEST(( svd_test_all_computation_options >(m, true) )); // check full only CALL_SUBTEST(( svd_test_all_computation_options >(m, false) )); CALL_SUBTEST(( svd_test_all_computation_options >(m, false) )); if(m.rows()==m.cols()) CALL_SUBTEST(( svd_test_all_computation_options >(m, false) )); } template void jacobisvd_verify_assert(const MatrixType& m) { svd_verify_assert >(m); svd_verify_assert >(m, true); svd_verify_assert >(m); svd_verify_assert >(m); Index rows = m.rows(); Index cols = m.cols(); enum { ColsAtCompileTime = MatrixType::ColsAtCompileTime }; MatrixType a = MatrixType::Zero(rows, cols); a.setZero(); if (ColsAtCompileTime == Dynamic) { JacobiSVD svd_fullqr; VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeFullU|ComputeThinV)) VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeThinV)) VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeFullV)) } } template void jacobisvd_method() { enum { Size = MatrixType::RowsAtCompileTime }; typedef typename MatrixType::RealScalar RealScalar; typedef Matrix RealVecType; MatrixType m = MatrixType::Identity(); VERIFY_IS_APPROX(m.jacobiSvd().singularValues(), RealVecType::Ones()); VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixU()); VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixV()); VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).solve(m), m); VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).transpose().solve(m), m); VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).adjoint().solve(m), m); } namespace Foo { // older compiler require a default constructor for Bar // cf: https://stackoverflow.com/questions/7411515/ class Bar {public: Bar() {}}; bool operator<(const Bar&, const Bar&) { return true; } } // regression test for a very strange MSVC issue for which simply // including SVDBase.h messes up with std::max and custom scalar type void msvc_workaround() { const Foo::Bar a; const Foo::Bar b; std::max EIGEN_NOT_A_MACRO (a,b); } EIGEN_DECLARE_TEST(jacobisvd) { CALL_SUBTEST_3(( jacobisvd_verify_assert(Matrix3f()) )); CALL_SUBTEST_4(( jacobisvd_verify_assert(Matrix4d()) )); CALL_SUBTEST_7(( jacobisvd_verify_assert(MatrixXf(10,12)) )); CALL_SUBTEST_8(( jacobisvd_verify_assert(MatrixXcd(7,5)) )); CALL_SUBTEST_11(svd_all_trivial_2x2(jacobisvd)); CALL_SUBTEST_12(svd_all_trivial_2x2(jacobisvd)); for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_3(( jacobisvd() )); CALL_SUBTEST_4(( jacobisvd() )); CALL_SUBTEST_5(( jacobisvd >() )); CALL_SUBTEST_6(( jacobisvd >(Matrix(10,2)) )); int r = internal::random(1, 30), c = internal::random(1, 30); TEST_SET_BUT_UNUSED_VARIABLE(r) TEST_SET_BUT_UNUSED_VARIABLE(c) CALL_SUBTEST_10(( jacobisvd(MatrixXd(r,c)) )); CALL_SUBTEST_7(( jacobisvd(MatrixXf(r,c)) )); CALL_SUBTEST_8(( jacobisvd(MatrixXcd(r,c)) )); (void) r; (void) c; // Test on inf/nan matrix CALL_SUBTEST_7( (svd_inf_nan, MatrixXf>()) ); CALL_SUBTEST_10( (svd_inf_nan, MatrixXd>()) ); // bug1395 test compile-time vectors as input CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix()) )); CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix()) )); CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix(r)) )); CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix(c)) )); } CALL_SUBTEST_7(( jacobisvd(MatrixXf(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); CALL_SUBTEST_8(( jacobisvd(MatrixXcd(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) )); // test matrixbase method CALL_SUBTEST_1(( jacobisvd_method() )); CALL_SUBTEST_3(( jacobisvd_method() )); // Test problem size constructors CALL_SUBTEST_7( JacobiSVD(10,10) ); // Check that preallocation avoids subsequent mallocs CALL_SUBTEST_9( svd_preallocate() ); CALL_SUBTEST_2( svd_underoverflow() ); msvc_workaround(); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/klu_support.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_NO_DEBUG_SMALL_PRODUCT_BLOCKS #include "sparse_solver.h" #include template void test_klu_support_T() { KLU > klu_colmajor; KLU > klu_rowmajor; check_sparse_square_solving(klu_colmajor); check_sparse_square_solving(klu_rowmajor); //check_sparse_square_determinant(umfpack_colmajor); //check_sparse_square_determinant(umfpack_rowmajor); } EIGEN_DECLARE_TEST(klu_support) { CALL_SUBTEST_1(test_klu_support_T()); CALL_SUBTEST_2(test_klu_support_T >()); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/linearstructure.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. static bool g_called; #define EIGEN_SCALAR_BINARY_OP_PLUGIN { g_called |= (!internal::is_same::value); } #include "main.h" template void linearStructure(const MatrixType& m) { using std::abs; /* this test covers the following files: CwiseUnaryOp.h, CwiseBinaryOp.h, SelfCwiseBinaryOp.h */ typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; Index rows = m.rows(); Index cols = m.cols(); // this test relies a lot on Random.h, and there's not much more that we can do // to test it, hence I consider that we will have tested Random.h MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), m3(rows, cols); Scalar s1 = internal::random(); while (abs(s1)(); Index r = internal::random(0, rows-1), c = internal::random(0, cols-1); VERIFY_IS_APPROX(-(-m1), m1); VERIFY_IS_APPROX(m1+m1, 2*m1); VERIFY_IS_APPROX(m1+m2-m1, m2); VERIFY_IS_APPROX(-m2+m1+m2, m1); VERIFY_IS_APPROX(m1*s1, s1*m1); VERIFY_IS_APPROX((m1+m2)*s1, s1*m1+s1*m2); VERIFY_IS_APPROX((-m1+m2)*s1, -s1*m1+s1*m2); m3 = m2; m3 += m1; VERIFY_IS_APPROX(m3, m1+m2); m3 = m2; m3 -= m1; VERIFY_IS_APPROX(m3, m2-m1); m3 = m2; m3 *= s1; VERIFY_IS_APPROX(m3, s1*m2); if(!NumTraits::IsInteger) { m3 = m2; m3 /= s1; VERIFY_IS_APPROX(m3, m2/s1); } // again, test operator() to check const-qualification VERIFY_IS_APPROX((-m1)(r,c), -(m1(r,c))); VERIFY_IS_APPROX((m1-m2)(r,c), (m1(r,c))-(m2(r,c))); VERIFY_IS_APPROX((m1+m2)(r,c), (m1(r,c))+(m2(r,c))); VERIFY_IS_APPROX((s1*m1)(r,c), s1*(m1(r,c))); VERIFY_IS_APPROX((m1*s1)(r,c), (m1(r,c))*s1); if(!NumTraits::IsInteger) VERIFY_IS_APPROX((m1/s1)(r,c), (m1(r,c))/s1); // use .block to disable vectorization and compare to the vectorized version VERIFY_IS_APPROX(m1+m1.block(0,0,rows,cols), m1+m1); VERIFY_IS_APPROX(m1.cwiseProduct(m1.block(0,0,rows,cols)), m1.cwiseProduct(m1)); VERIFY_IS_APPROX(m1 - m1.block(0,0,rows,cols), m1 - m1); VERIFY_IS_APPROX(m1.block(0,0,rows,cols) * s1, m1 * s1); } // Make sure that complex * real and real * complex are properly optimized template void real_complex(DenseIndex rows = MatrixType::RowsAtCompileTime, DenseIndex cols = MatrixType::ColsAtCompileTime) { typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; RealScalar s = internal::random(); MatrixType m1 = MatrixType::Random(rows, cols); g_called = false; VERIFY_IS_APPROX(s*m1, Scalar(s)*m1); VERIFY(g_called && "real * matrix not properly optimized"); g_called = false; VERIFY_IS_APPROX(m1*s, m1*Scalar(s)); VERIFY(g_called && "matrix * real not properly optimized"); g_called = false; VERIFY_IS_APPROX(m1/s, m1/Scalar(s)); VERIFY(g_called && "matrix / real not properly optimized"); g_called = false; VERIFY_IS_APPROX(s+m1.array(), Scalar(s)+m1.array()); VERIFY(g_called && "real + matrix not properly optimized"); g_called = false; VERIFY_IS_APPROX(m1.array()+s, m1.array()+Scalar(s)); VERIFY(g_called && "matrix + real not properly optimized"); g_called = false; VERIFY_IS_APPROX(s-m1.array(), Scalar(s)-m1.array()); VERIFY(g_called && "real - matrix not properly optimized"); g_called = false; VERIFY_IS_APPROX(m1.array()-s, m1.array()-Scalar(s)); VERIFY(g_called && "matrix - real not properly optimized"); } template void linearstructure_overflow() { // make sure that /=scalar and /scalar do not overflow // rational: 1.0/4.94e-320 overflow, but m/4.94e-320 should not Matrix4d m2, m3; m3 = m2 = Matrix4d::Random()*1e-20; m2 = m2 / 4.9e-320; VERIFY_IS_APPROX(m2.cwiseQuotient(m2), Matrix4d::Ones()); m3 /= 4.9e-320; VERIFY_IS_APPROX(m3.cwiseQuotient(m3), Matrix4d::Ones()); } EIGEN_DECLARE_TEST(linearstructure) { g_called = true; VERIFY(g_called); // avoid `unneeded-internal-declaration` warning. for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( linearStructure(Matrix()) ); CALL_SUBTEST_2( linearStructure(Matrix2f()) ); CALL_SUBTEST_3( linearStructure(Vector3d()) ); CALL_SUBTEST_4( linearStructure(Matrix4d()) ); CALL_SUBTEST_5( linearStructure(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); CALL_SUBTEST_6( linearStructure(MatrixXf (internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_7( linearStructure(MatrixXi (internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_8( linearStructure(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); CALL_SUBTEST_9( linearStructure(ArrayXXf (internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_10( linearStructure(ArrayXXcf (internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_11( real_complex() ); CALL_SUBTEST_11( real_complex(10,10) ); CALL_SUBTEST_11( real_complex(10,10) ); } CALL_SUBTEST_4( linearstructure_overflow<0>() ); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/lscg.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "sparse_solver.h" #include template void test_lscg_T() { LeastSquaresConjugateGradient > lscg_colmajor_diag; LeastSquaresConjugateGradient, IdentityPreconditioner> lscg_colmajor_I; LeastSquaresConjugateGradient > lscg_rowmajor_diag; LeastSquaresConjugateGradient, IdentityPreconditioner> lscg_rowmajor_I; CALL_SUBTEST( check_sparse_square_solving(lscg_colmajor_diag) ); CALL_SUBTEST( check_sparse_square_solving(lscg_colmajor_I) ); CALL_SUBTEST( check_sparse_leastsquare_solving(lscg_colmajor_diag) ); CALL_SUBTEST( check_sparse_leastsquare_solving(lscg_colmajor_I) ); CALL_SUBTEST( check_sparse_square_solving(lscg_rowmajor_diag) ); CALL_SUBTEST( check_sparse_square_solving(lscg_rowmajor_I) ); CALL_SUBTEST( check_sparse_leastsquare_solving(lscg_rowmajor_diag) ); CALL_SUBTEST( check_sparse_leastsquare_solving(lscg_rowmajor_I) ); } EIGEN_DECLARE_TEST(lscg) { CALL_SUBTEST_1(test_lscg_T()); CALL_SUBTEST_2(test_lscg_T >()); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/lu.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include #include "solverbase.h" using namespace std; template typename MatrixType::RealScalar matrix_l1_norm(const MatrixType& m) { return m.cwiseAbs().colwise().sum().maxCoeff(); } template void lu_non_invertible() { STATIC_CHECK(( internal::is_same::StorageIndex,int>::value )); typedef typename MatrixType::RealScalar RealScalar; /* this test covers the following files: LU.h */ Index rows, cols, cols2; if(MatrixType::RowsAtCompileTime==Dynamic) { rows = internal::random(2,EIGEN_TEST_MAX_SIZE); } else { rows = MatrixType::RowsAtCompileTime; } if(MatrixType::ColsAtCompileTime==Dynamic) { cols = internal::random(2,EIGEN_TEST_MAX_SIZE); cols2 = internal::random(2,EIGEN_TEST_MAX_SIZE); } else { cols2 = cols = MatrixType::ColsAtCompileTime; } enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime }; typedef typename internal::kernel_retval_base >::ReturnType KernelMatrixType; typedef typename internal::image_retval_base >::ReturnType ImageMatrixType; typedef Matrix CMatrixType; typedef Matrix RMatrixType; Index rank = internal::random(1, (std::min)(rows, cols)-1); // The image of the zero matrix should consist of a single (zero) column vector VERIFY((MatrixType::Zero(rows,cols).fullPivLu().image(MatrixType::Zero(rows,cols)).cols() == 1)); // The kernel of the zero matrix is the entire space, and thus is an invertible matrix of dimensions cols. KernelMatrixType kernel = MatrixType::Zero(rows,cols).fullPivLu().kernel(); VERIFY((kernel.fullPivLu().isInvertible())); MatrixType m1(rows, cols), m3(rows, cols2); CMatrixType m2(cols, cols2); createRandomPIMatrixOfRank(rank, rows, cols, m1); FullPivLU lu; // The special value 0.01 below works well in tests. Keep in mind that we're only computing the rank // of singular values are either 0 or 1. // So it's not clear at all that the epsilon should play any role there. lu.setThreshold(RealScalar(0.01)); lu.compute(m1); MatrixType u(rows,cols); u = lu.matrixLU().template triangularView(); RMatrixType l = RMatrixType::Identity(rows,rows); l.block(0,0,rows,(std::min)(rows,cols)).template triangularView() = lu.matrixLU().block(0,0,rows,(std::min)(rows,cols)); VERIFY_IS_APPROX(lu.permutationP() * m1 * lu.permutationQ(), l*u); KernelMatrixType m1kernel = lu.kernel(); ImageMatrixType m1image = lu.image(m1); VERIFY_IS_APPROX(m1, lu.reconstructedMatrix()); VERIFY(rank == lu.rank()); VERIFY(cols - lu.rank() == lu.dimensionOfKernel()); VERIFY(!lu.isInjective()); VERIFY(!lu.isInvertible()); VERIFY(!lu.isSurjective()); VERIFY_IS_MUCH_SMALLER_THAN((m1 * m1kernel), m1); VERIFY(m1image.fullPivLu().rank() == rank); VERIFY_IS_APPROX(m1 * m1.adjoint() * m1image, m1image); check_solverbase(m1, lu, rows, cols, cols2); m2 = CMatrixType::Random(cols,cols2); m3 = m1*m2; m2 = CMatrixType::Random(cols,cols2); // test that the code, which does resize(), may be applied to an xpr m2.block(0,0,m2.rows(),m2.cols()) = lu.solve(m3); VERIFY_IS_APPROX(m3, m1*m2); } template void lu_invertible() { /* this test covers the following files: FullPivLU.h */ typedef typename NumTraits::Real RealScalar; Index size = MatrixType::RowsAtCompileTime; if( size==Dynamic) size = internal::random(1,EIGEN_TEST_MAX_SIZE); MatrixType m1(size, size), m2(size, size), m3(size, size); FullPivLU lu; lu.setThreshold(RealScalar(0.01)); do { m1 = MatrixType::Random(size,size); lu.compute(m1); } while(!lu.isInvertible()); VERIFY_IS_APPROX(m1, lu.reconstructedMatrix()); VERIFY(0 == lu.dimensionOfKernel()); VERIFY(lu.kernel().cols() == 1); // the kernel() should consist of a single (zero) column vector VERIFY(size == lu.rank()); VERIFY(lu.isInjective()); VERIFY(lu.isSurjective()); VERIFY(lu.isInvertible()); VERIFY(lu.image(m1).fullPivLu().isInvertible()); check_solverbase(m1, lu, size, size, size); MatrixType m1_inverse = lu.inverse(); m3 = MatrixType::Random(size,size); m2 = lu.solve(m3); VERIFY_IS_APPROX(m2, m1_inverse*m3); RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse); const RealScalar rcond_est = lu.rcond(); // Verify that the estimated condition number is within a factor of 10 of the // truth. VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); // Regression test for Bug 302 MatrixType m4 = MatrixType::Random(size,size); VERIFY_IS_APPROX(lu.solve(m3*m4), lu.solve(m3)*m4); } template void lu_partial_piv(Index size = MatrixType::ColsAtCompileTime) { /* this test covers the following files: PartialPivLU.h */ typedef typename NumTraits::Real RealScalar; MatrixType m1(size, size), m2(size, size), m3(size, size); m1.setRandom(); PartialPivLU plu(m1); STATIC_CHECK(( internal::is_same::StorageIndex,int>::value )); VERIFY_IS_APPROX(m1, plu.reconstructedMatrix()); check_solverbase(m1, plu, size, size, size); MatrixType m1_inverse = plu.inverse(); m3 = MatrixType::Random(size,size); m2 = plu.solve(m3); VERIFY_IS_APPROX(m2, m1_inverse*m3); RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse); const RealScalar rcond_est = plu.rcond(); // Verify that the estimate is within a factor of 10 of the truth. VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); } template void lu_verify_assert() { MatrixType tmp; FullPivLU lu; VERIFY_RAISES_ASSERT(lu.matrixLU()) VERIFY_RAISES_ASSERT(lu.permutationP()) VERIFY_RAISES_ASSERT(lu.permutationQ()) VERIFY_RAISES_ASSERT(lu.kernel()) VERIFY_RAISES_ASSERT(lu.image(tmp)) VERIFY_RAISES_ASSERT(lu.solve(tmp)) VERIFY_RAISES_ASSERT(lu.transpose().solve(tmp)) VERIFY_RAISES_ASSERT(lu.adjoint().solve(tmp)) VERIFY_RAISES_ASSERT(lu.determinant()) VERIFY_RAISES_ASSERT(lu.rank()) VERIFY_RAISES_ASSERT(lu.dimensionOfKernel()) VERIFY_RAISES_ASSERT(lu.isInjective()) VERIFY_RAISES_ASSERT(lu.isSurjective()) VERIFY_RAISES_ASSERT(lu.isInvertible()) VERIFY_RAISES_ASSERT(lu.inverse()) PartialPivLU plu; VERIFY_RAISES_ASSERT(plu.matrixLU()) VERIFY_RAISES_ASSERT(plu.permutationP()) VERIFY_RAISES_ASSERT(plu.solve(tmp)) VERIFY_RAISES_ASSERT(plu.transpose().solve(tmp)) VERIFY_RAISES_ASSERT(plu.adjoint().solve(tmp)) VERIFY_RAISES_ASSERT(plu.determinant()) VERIFY_RAISES_ASSERT(plu.inverse()) } EIGEN_DECLARE_TEST(lu) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( lu_non_invertible() ); CALL_SUBTEST_1( lu_invertible() ); CALL_SUBTEST_1( lu_verify_assert() ); CALL_SUBTEST_1( lu_partial_piv() ); CALL_SUBTEST_2( (lu_non_invertible >()) ); CALL_SUBTEST_2( (lu_verify_assert >()) ); CALL_SUBTEST_2( lu_partial_piv() ); CALL_SUBTEST_2( lu_partial_piv() ); CALL_SUBTEST_2( (lu_partial_piv >()) ); CALL_SUBTEST_3( lu_non_invertible() ); CALL_SUBTEST_3( lu_invertible() ); CALL_SUBTEST_3( lu_verify_assert() ); CALL_SUBTEST_4( lu_non_invertible() ); CALL_SUBTEST_4( lu_invertible() ); CALL_SUBTEST_4( lu_partial_piv(internal::random(1,EIGEN_TEST_MAX_SIZE)) ); CALL_SUBTEST_4( lu_verify_assert() ); CALL_SUBTEST_5( lu_non_invertible() ); CALL_SUBTEST_5( lu_invertible() ); CALL_SUBTEST_5( lu_verify_assert() ); CALL_SUBTEST_6( lu_non_invertible() ); CALL_SUBTEST_6( lu_invertible() ); CALL_SUBTEST_6( lu_partial_piv(internal::random(1,EIGEN_TEST_MAX_SIZE)) ); CALL_SUBTEST_6( lu_verify_assert() ); CALL_SUBTEST_7(( lu_non_invertible >() )); // Test problem size constructors CALL_SUBTEST_9( PartialPivLU(10) ); CALL_SUBTEST_9( FullPivLU(10, 20); ); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/main.h ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include #include #include #include #include #include #include #include #include #include // The following includes of STL headers have to be done _before_ the // definition of macros min() and max(). The reason is that many STL // implementations will not work properly as the min and max symbols collide // with the STL functions std:min() and std::max(). The STL headers may check // for the macro definition of min/max and issue a warning or undefine the // macros. // // Still, Windows defines min() and max() in windef.h as part of the regular // Windows system interfaces and many other Windows APIs depend on these // macros being available. To prevent the macro expansion of min/max and to // make Eigen compatible with the Windows environment all function calls of // std::min() and std::max() have to be written with parenthesis around the // function name. // // All STL headers used by Eigen should be included here. Because main.h is // included before any Eigen header and because the STL headers are guarded // against multiple inclusions, no STL header will see our own min/max macro // definitions. #include #include // Disable ICC's std::complex operator specializations so we can use our own. #define _OVERRIDE_COMPLEX_SPECIALIZATION_ 1 #include #include #include #include #include #if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) #include #include #ifdef EIGEN_USE_THREADS #include #endif #endif // Configure GPU. #if defined(EIGEN_USE_HIP) #if defined(__HIPCC__) && !defined(EIGEN_NO_HIP) #define EIGEN_HIPCC __HIPCC__ #include #include #endif #elif defined(__CUDACC__) && !defined(EIGEN_NO_CUDA) #define EIGEN_CUDACC __CUDACC__ #include #include #include #if CUDA_VERSION >= 7050 #include #endif #endif #if defined(EIGEN_CUDACC) || defined(EIGEN_HIPCC) #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #endif // To test that all calls from Eigen code to std::min() and std::max() are // protected by parenthesis against macro expansion, the min()/max() macros // are defined here and any not-parenthesized min/max call will cause a // compiler error. #if !defined(__HIPCC__) && !defined(EIGEN_USE_SYCL) // // HIP header files include the following files // // // // which seem to contain not-parenthesized calls to "max"/"min", triggering the following check and causing the compile to fail // // Including those header files before the following macro definition for "min" / "max", only partially resolves the issue // This is because other HIP header files also define "isnan" / "isinf" / "isfinite" functions, which are needed in other // headers. // // So instead choosing to simply disable this check for HIP // #define min(A,B) please_protect_your_min_with_parentheses #define max(A,B) please_protect_your_max_with_parentheses #define isnan(X) please_protect_your_isnan_with_parentheses #define isinf(X) please_protect_your_isinf_with_parentheses #define isfinite(X) please_protect_your_isfinite_with_parentheses #endif // test possible conflicts struct real {}; struct imag {}; #ifdef M_PI #undef M_PI #endif #define M_PI please_use_EIGEN_PI_instead_of_M_PI #define FORBIDDEN_IDENTIFIER (this_identifier_is_forbidden_to_avoid_clashes) this_identifier_is_forbidden_to_avoid_clashes // B0 is defined in POSIX header termios.h #define B0 FORBIDDEN_IDENTIFIER // `I` may be defined by complex.h: #define I FORBIDDEN_IDENTIFIER // Unit tests calling Eigen's blas library must preserve the default blocking size // to avoid troubles. #ifndef EIGEN_NO_DEBUG_SMALL_PRODUCT_BLOCKS #define EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS #endif // shuts down ICC's remark #593: variable "XXX" was set but never used #define TEST_SET_BUT_UNUSED_VARIABLE(X) EIGEN_UNUSED_VARIABLE(X) #ifdef TEST_ENABLE_TEMPORARY_TRACKING static long int nb_temporaries; static long int nb_temporaries_on_assert = -1; inline void on_temporary_creation(long int size) { // here's a great place to set a breakpoint when debugging failures in this test! if(size!=0) nb_temporaries++; if(nb_temporaries_on_assert>0) assert(nb_temporaries if NDEBUG is not defined. #ifndef DEBUG #define DEBUG #endif // bounds integer values for AltiVec #if defined(__ALTIVEC__) || defined(__VSX__) #define EIGEN_MAKING_DOCS #endif #define DEFAULT_REPEAT 10 namespace Eigen { static std::vector g_test_stack; // level == 0 <=> abort if test fail // level >= 1 <=> warning message to std::cerr if test fail static int g_test_level = 0; static int g_repeat = 1; static unsigned int g_seed = 0; static bool g_has_set_repeat = false, g_has_set_seed = false; class EigenTest { public: EigenTest() : m_func(0) {} EigenTest(const char* a_name, void (*func)(void)) : m_name(a_name), m_func(func) { get_registered_tests().push_back(this); } const std::string& name() const { return m_name; } void operator()() const { m_func(); } static const std::vector& all() { return get_registered_tests(); } protected: static std::vector& get_registered_tests() { static std::vector* ms_registered_tests = new std::vector(); return *ms_registered_tests; } std::string m_name; void (*m_func)(void); }; // Declare and register a test, e.g.: // EIGEN_DECLARE_TEST(mytest) { ... } // will create a function: // void test_mytest() { ... } // that will be automatically called. #define EIGEN_DECLARE_TEST(X) \ void EIGEN_CAT(test_,X) (); \ static EigenTest EIGEN_CAT(test_handler_,X) (EIGEN_MAKESTRING(X), & EIGEN_CAT(test_,X)); \ void EIGEN_CAT(test_,X) () } #define TRACK std::cerr << __FILE__ << " " << __LINE__ << std::endl // #define TRACK while() #define EIGEN_DEFAULT_IO_FORMAT IOFormat(4, 0, " ", "\n", "", "", "", "") #if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__) && !defined(__HIP_DEVICE_COMPILE__) && !defined(__SYCL_DEVICE_ONLY__) #define EIGEN_EXCEPTIONS #endif #ifndef EIGEN_NO_ASSERTION_CHECKING namespace Eigen { static const bool should_raise_an_assert = false; // Used to avoid to raise two exceptions at a time in which // case the exception is not properly caught. // This may happen when a second exceptions is triggered in a destructor. static bool no_more_assert = false; static bool report_on_cerr_on_assert_failure = true; struct eigen_assert_exception { eigen_assert_exception(void) {} ~eigen_assert_exception() { Eigen::no_more_assert = false; } }; struct eigen_static_assert_exception { eigen_static_assert_exception(void) {} ~eigen_static_assert_exception() { Eigen::no_more_assert = false; } }; } // If EIGEN_DEBUG_ASSERTS is defined and if no assertion is triggered while // one should have been, then the list of executed assertions is printed out. // // EIGEN_DEBUG_ASSERTS is not enabled by default as it // significantly increases the compilation time // and might even introduce side effects that would hide // some memory errors. #ifdef EIGEN_DEBUG_ASSERTS namespace Eigen { namespace internal { static bool push_assert = false; } static std::vector eigen_assert_list; } #define eigen_assert(a) \ if( (!(a)) && (!no_more_assert) ) \ { \ if(report_on_cerr_on_assert_failure) \ std::cerr << #a << " " __FILE__ << "(" << __LINE__ << ")\n"; \ Eigen::no_more_assert = true; \ EIGEN_THROW_X(Eigen::eigen_assert_exception()); \ } \ else if (Eigen::internal::push_assert) \ { \ eigen_assert_list.push_back(std::string(EIGEN_MAKESTRING(__FILE__) " (" EIGEN_MAKESTRING(__LINE__) ") : " #a) ); \ } #ifdef EIGEN_EXCEPTIONS #define VERIFY_RAISES_ASSERT(a) \ { \ Eigen::no_more_assert = false; \ Eigen::eigen_assert_list.clear(); \ Eigen::internal::push_assert = true; \ Eigen::report_on_cerr_on_assert_failure = false; \ try { \ a; \ std::cerr << "One of the following asserts should have been triggered:\n"; \ for (uint ai=0 ; ai // required for createRandomPIMatrixOfRank and generateRandomMatrixSvs inline void verify_impl(bool condition, const char *testname, const char *file, int line, const char *condition_as_string) { if (!condition) { if(Eigen::g_test_level>0) std::cerr << "WARNING: "; std::cerr << "Test " << testname << " failed in " << file << " (" << line << ")" << std::endl << " " << condition_as_string << std::endl; std::cerr << "Stack:\n"; const int test_stack_size = static_cast(Eigen::g_test_stack.size()); for(int i=test_stack_size-1; i>=0; --i) std::cerr << " - " << Eigen::g_test_stack[i] << "\n"; std::cerr << "\n"; if(Eigen::g_test_level==0) abort(); } } #define VERIFY(a) ::verify_impl(a, g_test_stack.back().c_str(), __FILE__, __LINE__, EIGEN_MAKESTRING(a)) #define VERIFY_GE(a, b) ::verify_impl(a >= b, g_test_stack.back().c_str(), __FILE__, __LINE__, EIGEN_MAKESTRING(a >= b)) #define VERIFY_LE(a, b) ::verify_impl(a <= b, g_test_stack.back().c_str(), __FILE__, __LINE__, EIGEN_MAKESTRING(a <= b)) #define VERIFY_IS_EQUAL(a, b) VERIFY(test_is_equal(a, b, true)) #define VERIFY_IS_NOT_EQUAL(a, b) VERIFY(test_is_equal(a, b, false)) #define VERIFY_IS_APPROX(a, b) VERIFY(verifyIsApprox(a, b)) #define VERIFY_IS_NOT_APPROX(a, b) VERIFY(!test_isApprox(a, b)) #define VERIFY_IS_MUCH_SMALLER_THAN(a, b) VERIFY(test_isMuchSmallerThan(a, b)) #define VERIFY_IS_NOT_MUCH_SMALLER_THAN(a, b) VERIFY(!test_isMuchSmallerThan(a, b)) #define VERIFY_IS_APPROX_OR_LESS_THAN(a, b) VERIFY(test_isApproxOrLessThan(a, b)) #define VERIFY_IS_NOT_APPROX_OR_LESS_THAN(a, b) VERIFY(!test_isApproxOrLessThan(a, b)) #define VERIFY_IS_CWISE_EQUAL(a, b) VERIFY(verifyIsCwiseApprox(a, b, true)) #define VERIFY_IS_CWISE_APPROX(a, b) VERIFY(verifyIsCwiseApprox(a, b, false)) #define VERIFY_IS_UNITARY(a) VERIFY(test_isUnitary(a)) #define STATIC_CHECK(COND) EIGEN_STATIC_ASSERT( (COND) , EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT ) #define CALL_SUBTEST(FUNC) do { \ g_test_stack.push_back(EIGEN_MAKESTRING(FUNC)); \ FUNC; \ g_test_stack.pop_back(); \ } while (0) // Forward declarations to avoid ICC warnings #if EIGEN_COMP_ICC template std::string type_name(); namespace Eigen { template bool test_is_equal(const T& actual, const U& expected, bool expect_equal=true); } // end namespace Eigen #endif // EIGEN_COMP_ICC namespace Eigen { template typename internal::enable_if::value,bool>::type is_same_type(const T1&, const T2&) { return true; } template inline typename NumTraits::Real test_precision() { return NumTraits::dummy_precision(); } template<> inline float test_precision() { return 1e-3f; } template<> inline double test_precision() { return 1e-6; } template<> inline long double test_precision() { return 1e-6l; } template<> inline float test_precision >() { return test_precision(); } template<> inline double test_precision >() { return test_precision(); } template<> inline long double test_precision >() { return test_precision(); } #define EIGEN_TEST_SCALAR_TEST_OVERLOAD(TYPE) \ inline bool test_isApprox(TYPE a, TYPE b) \ { return internal::isApprox(a, b, test_precision()); } \ inline bool test_isCwiseApprox(TYPE a, TYPE b, bool exact) \ { return a == b || ((numext::isnan)(a) && (numext::isnan)(b)) || \ (!exact && internal::isApprox(a, b, test_precision())); } \ inline bool test_isMuchSmallerThan(TYPE a, TYPE b) \ { return internal::isMuchSmallerThan(a, b, test_precision()); } \ inline bool test_isApproxOrLessThan(TYPE a, TYPE b) \ { return internal::isApproxOrLessThan(a, b, test_precision()); } EIGEN_TEST_SCALAR_TEST_OVERLOAD(short) EIGEN_TEST_SCALAR_TEST_OVERLOAD(unsigned short) EIGEN_TEST_SCALAR_TEST_OVERLOAD(int) EIGEN_TEST_SCALAR_TEST_OVERLOAD(unsigned int) EIGEN_TEST_SCALAR_TEST_OVERLOAD(long) EIGEN_TEST_SCALAR_TEST_OVERLOAD(unsigned long) #if EIGEN_HAS_CXX11 EIGEN_TEST_SCALAR_TEST_OVERLOAD(long long) EIGEN_TEST_SCALAR_TEST_OVERLOAD(unsigned long long) #endif EIGEN_TEST_SCALAR_TEST_OVERLOAD(float) EIGEN_TEST_SCALAR_TEST_OVERLOAD(double) EIGEN_TEST_SCALAR_TEST_OVERLOAD(half) EIGEN_TEST_SCALAR_TEST_OVERLOAD(bfloat16) #undef EIGEN_TEST_SCALAR_TEST_OVERLOAD #ifndef EIGEN_TEST_NO_COMPLEX inline bool test_isApprox(const std::complex& a, const std::complex& b) { return internal::isApprox(a, b, test_precision >()); } inline bool test_isMuchSmallerThan(const std::complex& a, const std::complex& b) { return internal::isMuchSmallerThan(a, b, test_precision >()); } inline bool test_isApprox(const std::complex& a, const std::complex& b) { return internal::isApprox(a, b, test_precision >()); } inline bool test_isMuchSmallerThan(const std::complex& a, const std::complex& b) { return internal::isMuchSmallerThan(a, b, test_precision >()); } #ifndef EIGEN_TEST_NO_LONGDOUBLE inline bool test_isApprox(const std::complex& a, const std::complex& b) { return internal::isApprox(a, b, test_precision >()); } inline bool test_isMuchSmallerThan(const std::complex& a, const std::complex& b) { return internal::isMuchSmallerThan(a, b, test_precision >()); } #endif #endif #ifndef EIGEN_TEST_NO_LONGDOUBLE inline bool test_isApprox(const long double& a, const long double& b) { bool ret = internal::isApprox(a, b, test_precision()); if (!ret) std::cerr << std::endl << " actual = " << a << std::endl << " expected = " << b << std::endl << std::endl; return ret; } inline bool test_isMuchSmallerThan(const long double& a, const long double& b) { return internal::isMuchSmallerThan(a, b, test_precision()); } inline bool test_isApproxOrLessThan(const long double& a, const long double& b) { return internal::isApproxOrLessThan(a, b, test_precision()); } #endif // EIGEN_TEST_NO_LONGDOUBLE // test_relative_error returns the relative difference between a and b as a real scalar as used in isApprox. template typename NumTraits::NonInteger test_relative_error(const EigenBase &a, const EigenBase &b) { using std::sqrt; typedef typename NumTraits::NonInteger RealScalar; typename internal::nested_eval::type ea(a.derived()); typename internal::nested_eval::type eb(b.derived()); return sqrt(RealScalar((ea-eb).cwiseAbs2().sum()) / RealScalar((std::min)(eb.cwiseAbs2().sum(),ea.cwiseAbs2().sum()))); } template typename T1::RealScalar test_relative_error(const T1 &a, const T2 &b, const typename T1::Coefficients* = 0) { return test_relative_error(a.coeffs(), b.coeffs()); } template typename T1::Scalar test_relative_error(const T1 &a, const T2 &b, const typename T1::MatrixType* = 0) { return test_relative_error(a.matrix(), b.matrix()); } template S test_relative_error(const Translation &a, const Translation &b) { return test_relative_error(a.vector(), b.vector()); } template S test_relative_error(const ParametrizedLine &a, const ParametrizedLine &b) { return (std::max)(test_relative_error(a.origin(), b.origin()), test_relative_error(a.origin(), b.origin())); } template S test_relative_error(const AlignedBox &a, const AlignedBox &b) { return (std::max)(test_relative_error((a.min)(), (b.min)()), test_relative_error((a.max)(), (b.max)())); } template class SparseMatrixBase; template typename T1::RealScalar test_relative_error(const MatrixBase &a, const SparseMatrixBase &b) { return test_relative_error(a,b.toDense()); } template class SparseMatrixBase; template typename T1::RealScalar test_relative_error(const SparseMatrixBase &a, const MatrixBase &b) { return test_relative_error(a.toDense(),b); } template class SparseMatrixBase; template typename T1::RealScalar test_relative_error(const SparseMatrixBase &a, const SparseMatrixBase &b) { return test_relative_error(a.toDense(),b.toDense()); } template typename NumTraits::Real>::NonInteger test_relative_error(const T1 &a, const T2 &b, typename internal::enable_if::Real>::value, T1>::type* = 0) { typedef typename NumTraits::Real>::NonInteger RealScalar; return numext::sqrt(RealScalar(numext::abs2(a-b))/(numext::mini)(RealScalar(numext::abs2(a)),RealScalar(numext::abs2(b)))); } template T test_relative_error(const Rotation2D &a, const Rotation2D &b) { return test_relative_error(a.angle(), b.angle()); } template T test_relative_error(const AngleAxis &a, const AngleAxis &b) { return (std::max)(test_relative_error(a.angle(), b.angle()), test_relative_error(a.axis(), b.axis())); } template inline bool test_isApprox(const Type1& a, const Type2& b, typename Type1::Scalar* = 0) // Enabled for Eigen's type only { return a.isApprox(b, test_precision()); } // get_test_precision is a small wrapper to test_precision allowing to return the scalar precision for either scalars or expressions template typename NumTraits::Real get_test_precision(const T&, const typename T::Scalar* = 0) { return test_precision::Real>(); } template typename NumTraits::Real get_test_precision(const T&,typename internal::enable_if::Real>::value, T>::type* = 0) { return test_precision::Real>(); } // verifyIsApprox is a wrapper to test_isApprox that outputs the relative difference magnitude if the test fails. template inline bool verifyIsApprox(const Type1& a, const Type2& b) { bool ret = test_isApprox(a,b); if(!ret) { std::cerr << "Difference too large wrt tolerance " << get_test_precision(a) << ", relative error is: " << test_relative_error(a,b) << std::endl; } return ret; } // verifyIsCwiseApprox is a wrapper to test_isCwiseApprox that outputs the relative difference magnitude if the test fails. template inline bool verifyIsCwiseApprox(const Type1& a, const Type2& b, bool exact) { bool ret = test_isCwiseApprox(a,b,exact); if(!ret) { if (exact) { std::cerr << "Values are not an exact match"; } else { std::cerr << "Difference too large wrt tolerance " << get_test_precision(a); } std::cerr << ", relative error is: " << test_relative_error(a,b) << std::endl; } return ret; } // The idea behind this function is to compare the two scalars a and b where // the scalar ref is a hint about the expected order of magnitude of a and b. // WARNING: the scalar a and b must be positive // Therefore, if for some reason a and b are very small compared to ref, // we won't issue a false negative. // This test could be: abs(a-b) <= eps * ref // However, it seems that simply comparing a+ref and b+ref is more sensitive to true error. template inline bool test_isApproxWithRef(const Scalar& a, const Scalar& b, const ScalarRef& ref) { return test_isApprox(a+ref, b+ref); } template inline bool test_isMuchSmallerThan(const MatrixBase& m1, const MatrixBase& m2) { return m1.isMuchSmallerThan(m2, test_precision::Scalar>()); } template inline bool test_isMuchSmallerThan(const MatrixBase& m, const typename NumTraits::Scalar>::Real& s) { return m.isMuchSmallerThan(s, test_precision::Scalar>()); } template inline bool test_isUnitary(const MatrixBase& m) { return m.isUnitary(test_precision::Scalar>()); } // Checks component-wise, works with infs and nans. template bool test_isCwiseApprox(const DenseBase& m1, const DenseBase& m2, bool exact) { if (m1.rows() != m2.rows()) { return false; } if (m1.cols() != m2.cols()) { return false; } for (Index r = 0; r < m1.rows(); ++r) { for (Index c = 0; c < m1.cols(); ++c) { if (m1(r, c) != m2(r, c) && !((numext::isnan)(m1(r, c)) && (numext::isnan)(m2(r, c))) && (exact || !test_isApprox(m1(r, c), m2(r, c)))) { return false; } } } return true; } template bool test_is_equal(const T& actual, const U& expected, bool expect_equal) { if ((actual==expected) == expect_equal) return true; // false: std::cerr << "\n actual = " << actual << "\n expected " << (expect_equal ? "= " : "!=") << expected << "\n\n"; return false; } /** * Check if number is "not a number" (NaN). * * @tparam T input type * @param x input value * @return true, if input value is "not a number" (NaN) */ template bool isNotNaN(const T& x) { return x==x; } /** * Check if number is plus infinity. * * @tparam T input type * @param x input value * @return true, if input value is plus infinity */ template bool isPlusInf(const T& x) { return x > NumTraits::highest(); } /** * Check if number is minus infinity. * * @tparam T input type * @param x input value * @return true, if input value is minus infinity */ template bool isMinusInf(const T& x) { return x < NumTraits::lowest(); } } // end namespace Eigen #include "random_matrix_helper.h" template struct GetDifferentType; template<> struct GetDifferentType { typedef double type; }; template<> struct GetDifferentType { typedef float type; }; template struct GetDifferentType > { typedef std::complex::type> type; }; template std::string type_name() { return "other"; } template<> std::string type_name() { return "float"; } template<> std::string type_name() { return "double"; } template<> std::string type_name() { return "long double"; } template<> std::string type_name() { return "int"; } template<> std::string type_name >() { return "complex"; } template<> std::string type_name >() { return "complex"; } template<> std::string type_name >() { return "complex"; } template<> std::string type_name >() { return "complex"; } using namespace Eigen; /** * Set number of repetitions for unit test from input string. * * @param str input string */ inline void set_repeat_from_string(const char *str) { errno = 0; g_repeat = int(strtoul(str, 0, 10)); if(errno || g_repeat <= 0) { std::cout << "Invalid repeat value " << str << std::endl; exit(EXIT_FAILURE); } g_has_set_repeat = true; } /** * Set seed for randomized unit tests from input string. * * @param str input string */ inline void set_seed_from_string(const char *str) { errno = 0; g_seed = int(strtoul(str, 0, 10)); if(errno || g_seed == 0) { std::cout << "Invalid seed value " << str << std::endl; exit(EXIT_FAILURE); } g_has_set_seed = true; } int main(int argc, char *argv[]) { g_has_set_repeat = false; g_has_set_seed = false; bool need_help = false; for(int i = 1; i < argc; i++) { if(argv[i][0] == 'r') { if(g_has_set_repeat) { std::cout << "Argument " << argv[i] << " conflicting with a former argument" << std::endl; return 1; } set_repeat_from_string(argv[i]+1); } else if(argv[i][0] == 's') { if(g_has_set_seed) { std::cout << "Argument " << argv[i] << " conflicting with a former argument" << std::endl; return 1; } set_seed_from_string(argv[i]+1); } else { need_help = true; } } if(need_help) { std::cout << "This test application takes the following optional arguments:" << std::endl; std::cout << " rN Repeat each test N times (default: " << DEFAULT_REPEAT << ")" << std::endl; std::cout << " sN Use N as seed for random numbers (default: based on current time)" << std::endl; std::cout << std::endl; std::cout << "If defined, the environment variables EIGEN_REPEAT and EIGEN_SEED" << std::endl; std::cout << "will be used as default values for these parameters." << std::endl; return 1; } char *env_EIGEN_REPEAT = getenv("EIGEN_REPEAT"); if(!g_has_set_repeat && env_EIGEN_REPEAT) set_repeat_from_string(env_EIGEN_REPEAT); char *env_EIGEN_SEED = getenv("EIGEN_SEED"); if(!g_has_set_seed && env_EIGEN_SEED) set_seed_from_string(env_EIGEN_SEED); if(!g_has_set_seed) g_seed = (unsigned int) time(NULL); if(!g_has_set_repeat) g_repeat = DEFAULT_REPEAT; std::cout << "Initializing random number generator with seed " << g_seed << std::endl; std::stringstream ss; ss << "Seed: " << g_seed; g_test_stack.push_back(ss.str()); srand(g_seed); std::cout << "Repeating each test " << g_repeat << " times" << std::endl; VERIFY(EigenTest::all().size()>0); for(std::size_t i=0; i this warning is raised even for legal usage as: g_test_stack.push_back("foo"); where g_test_stack is a std::vector // remark #1418: external function definition with no prior declaration // -> this warning is raised for all our test functions. Declaring them static would fix the issue. // warning #279: controlling expression is constant // remark #1572: floating-point equality and inequality comparisons are unreliable #pragma warning disable 279 383 1418 1572 #endif #ifdef _MSC_VER // 4503 - decorated name length exceeded, name was truncated #pragma warning( disable : 4503) #endif #include "gpu_test_helper.h" ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/mapped_matrix.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #define EIGEN_TESTMAP_MAX_SIZE 256 template void map_class_vector(const VectorType& m) { typedef typename VectorType::Scalar Scalar; Index size = m.size(); Scalar* array1 = internal::aligned_new(size); Scalar* array2 = internal::aligned_new(size); Scalar* array3 = new Scalar[size+1]; Scalar* array3unaligned = (internal::UIntPtr(array3)%EIGEN_MAX_ALIGN_BYTES) == 0 ? array3+1 : array3; Scalar array4[EIGEN_TESTMAP_MAX_SIZE]; Map(array1, size) = VectorType::Random(size); Map(array2, size) = Map(array1, size); Map(array3unaligned, size) = Map(array1, size); Map(array4, size) = Map(array1, size); VectorType ma1 = Map(array1, size); VectorType ma2 = Map(array2, size); VectorType ma3 = Map(array3unaligned, size); VectorType ma4 = Map(array4, size); VERIFY_IS_EQUAL(ma1, ma2); VERIFY_IS_EQUAL(ma1, ma3); VERIFY_IS_EQUAL(ma1, ma4); #ifdef EIGEN_VECTORIZE if(internal::packet_traits::Vectorizable && size>=AlignedMax) VERIFY_RAISES_ASSERT((Map(array3unaligned, size))) #endif internal::aligned_delete(array1, size); internal::aligned_delete(array2, size); delete[] array3; } template void map_class_matrix(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; Index rows = m.rows(), cols = m.cols(), size = rows*cols; Scalar s1 = internal::random(); // array1 and array2 -> aligned heap allocation Scalar* array1 = internal::aligned_new(size); for(int i = 0; i < size; i++) array1[i] = Scalar(1); Scalar* array2 = internal::aligned_new(size); for(int i = 0; i < size; i++) array2[i] = Scalar(1); // array3unaligned -> unaligned pointer to heap Scalar* array3 = new Scalar[size+1]; Index sizep1 = size + 1; // <- without this temporary MSVC 2103 generates bad code for(Index i = 0; i < sizep1; i++) array3[i] = Scalar(1); Scalar* array3unaligned = (internal::UIntPtr(array3)%EIGEN_MAX_ALIGN_BYTES) == 0 ? array3+1 : array3; Scalar array4[256]; if(size<=256) for(int i = 0; i < size; i++) array4[i] = Scalar(1); Map map1(array1, rows, cols); Map map2(array2, rows, cols); Map map3(array3unaligned, rows, cols); Map map4(array4, rows, cols); VERIFY_IS_EQUAL(map1, MatrixType::Ones(rows,cols)); VERIFY_IS_EQUAL(map2, MatrixType::Ones(rows,cols)); VERIFY_IS_EQUAL(map3, MatrixType::Ones(rows,cols)); map1 = MatrixType::Random(rows,cols); map2 = map1; map3 = map1; MatrixType ma1 = map1; MatrixType ma2 = map2; MatrixType ma3 = map3; VERIFY_IS_EQUAL(map1, map2); VERIFY_IS_EQUAL(map1, map3); VERIFY_IS_EQUAL(ma1, ma2); VERIFY_IS_EQUAL(ma1, ma3); VERIFY_IS_EQUAL(ma1, map3); VERIFY_IS_APPROX(s1*map1, s1*map2); VERIFY_IS_APPROX(s1*ma1, s1*ma2); VERIFY_IS_EQUAL(s1*ma1, s1*ma3); VERIFY_IS_APPROX(s1*map1, s1*map3); map2 *= s1; map3 *= s1; VERIFY_IS_APPROX(s1*map1, map2); VERIFY_IS_APPROX(s1*map1, map3); if(size<=256) { VERIFY_IS_EQUAL(map4, MatrixType::Ones(rows,cols)); map4 = map1; MatrixType ma4 = map4; VERIFY_IS_EQUAL(map1, map4); VERIFY_IS_EQUAL(ma1, map4); VERIFY_IS_EQUAL(ma1, ma4); VERIFY_IS_APPROX(s1*map1, s1*map4); map4 *= s1; VERIFY_IS_APPROX(s1*map1, map4); } internal::aligned_delete(array1, size); internal::aligned_delete(array2, size); delete[] array3; } template void map_static_methods(const VectorType& m) { typedef typename VectorType::Scalar Scalar; Index size = m.size(); Scalar* array1 = internal::aligned_new(size); Scalar* array2 = internal::aligned_new(size); Scalar* array3 = new Scalar[size+1]; Scalar* array3unaligned = internal::UIntPtr(array3)%EIGEN_MAX_ALIGN_BYTES == 0 ? array3+1 : array3; VectorType::MapAligned(array1, size) = VectorType::Random(size); VectorType::Map(array2, size) = VectorType::Map(array1, size); VectorType::Map(array3unaligned, size) = VectorType::Map(array1, size); VectorType ma1 = VectorType::Map(array1, size); VectorType ma2 = VectorType::MapAligned(array2, size); VectorType ma3 = VectorType::Map(array3unaligned, size); VERIFY_IS_EQUAL(ma1, ma2); VERIFY_IS_EQUAL(ma1, ma3); internal::aligned_delete(array1, size); internal::aligned_delete(array2, size); delete[] array3; } template void check_const_correctness(const PlainObjectType&) { // there's a lot that we can't test here while still having this test compile! // the only possible approach would be to run a script trying to compile stuff and checking that it fails. // CMake can help with that. // verify that map-to-const don't have LvalueBit typedef typename internal::add_const::type ConstPlainObjectType; VERIFY( !(internal::traits >::Flags & LvalueBit) ); VERIFY( !(internal::traits >::Flags & LvalueBit) ); VERIFY( !(Map::Flags & LvalueBit) ); VERIFY( !(Map::Flags & LvalueBit) ); } template void map_not_aligned_on_scalar() { typedef Matrix MatrixType; Index size = 11; Scalar* array1 = internal::aligned_new((size+1)*(size+1)+1); Scalar* array2 = reinterpret_cast(sizeof(Scalar)/2+std::size_t(array1)); Map > map2(array2, size, size, OuterStride<>(size+1)); MatrixType m2 = MatrixType::Random(size,size); map2 = m2; VERIFY_IS_EQUAL(m2, map2); typedef Matrix VectorType; Map map3(array2, size); MatrixType v3 = VectorType::Random(size); map3 = v3; VERIFY_IS_EQUAL(v3, map3); internal::aligned_delete(array1, (size+1)*(size+1)+1); } EIGEN_DECLARE_TEST(mapped_matrix) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( map_class_vector(Matrix()) ); CALL_SUBTEST_1( check_const_correctness(Matrix()) ); CALL_SUBTEST_2( map_class_vector(Vector4d()) ); CALL_SUBTEST_2( map_class_vector(VectorXd(13)) ); CALL_SUBTEST_2( check_const_correctness(Matrix4d()) ); CALL_SUBTEST_3( map_class_vector(RowVector4f()) ); CALL_SUBTEST_4( map_class_vector(VectorXcf(8)) ); CALL_SUBTEST_5( map_class_vector(VectorXi(12)) ); CALL_SUBTEST_5( check_const_correctness(VectorXi(12)) ); CALL_SUBTEST_1( map_class_matrix(Matrix()) ); CALL_SUBTEST_2( map_class_matrix(Matrix4d()) ); CALL_SUBTEST_11( map_class_matrix(Matrix()) ); CALL_SUBTEST_4( map_class_matrix(MatrixXcf(internal::random(1,10),internal::random(1,10))) ); CALL_SUBTEST_5( map_class_matrix(MatrixXi(internal::random(1,10),internal::random(1,10))) ); CALL_SUBTEST_6( map_static_methods(Matrix()) ); CALL_SUBTEST_7( map_static_methods(Vector3f()) ); CALL_SUBTEST_8( map_static_methods(RowVector3d()) ); CALL_SUBTEST_9( map_static_methods(VectorXcd(8)) ); CALL_SUBTEST_10( map_static_methods(VectorXf(12)) ); CALL_SUBTEST_11( map_not_aligned_on_scalar() ); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/mapstaticmethods.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" // GCC<=4.8 has spurious shadow warnings, because `ptr` re-appears inside template instantiations // workaround: put these in an anonymous namespace namespace { float *ptr; const float *const_ptr; } template struct mapstaticmethods_impl {}; template struct mapstaticmethods_impl { static void run(const PlainObjectType& m) { mapstaticmethods_impl::run(m); int i = internal::random(2,5), j = internal::random(2,5); PlainObjectType::Map(ptr).setZero(); PlainObjectType::MapAligned(ptr).setZero(); PlainObjectType::Map(const_ptr).sum(); PlainObjectType::MapAligned(const_ptr).sum(); PlainObjectType::Map(ptr, InnerStride<>(i)).setZero(); PlainObjectType::MapAligned(ptr, InnerStride<>(i)).setZero(); PlainObjectType::Map(const_ptr, InnerStride<>(i)).sum(); PlainObjectType::MapAligned(const_ptr, InnerStride<>(i)).sum(); PlainObjectType::Map(ptr, InnerStride<2>()).setZero(); PlainObjectType::MapAligned(ptr, InnerStride<3>()).setZero(); PlainObjectType::Map(const_ptr, InnerStride<4>()).sum(); PlainObjectType::MapAligned(const_ptr, InnerStride<5>()).sum(); PlainObjectType::Map(ptr, OuterStride<>(i)).setZero(); PlainObjectType::MapAligned(ptr, OuterStride<>(i)).setZero(); PlainObjectType::Map(const_ptr, OuterStride<>(i)).sum(); PlainObjectType::MapAligned(const_ptr, OuterStride<>(i)).sum(); PlainObjectType::Map(ptr, OuterStride<2>()).setZero(); PlainObjectType::MapAligned(ptr, OuterStride<3>()).setZero(); PlainObjectType::Map(const_ptr, OuterStride<4>()).sum(); PlainObjectType::MapAligned(const_ptr, OuterStride<5>()).sum(); PlainObjectType::Map(ptr, Stride(i,j)).setZero(); PlainObjectType::MapAligned(ptr, Stride<2,Dynamic>(2,i)).setZero(); PlainObjectType::Map(const_ptr, Stride(i,3)).sum(); PlainObjectType::MapAligned(const_ptr, Stride(i,j)).sum(); PlainObjectType::Map(ptr, Stride<2,3>()).setZero(); PlainObjectType::MapAligned(ptr, Stride<3,4>()).setZero(); PlainObjectType::Map(const_ptr, Stride<2,4>()).sum(); PlainObjectType::MapAligned(const_ptr, Stride<5,3>()).sum(); } }; template struct mapstaticmethods_impl { static void run(const PlainObjectType& m) { Index rows = m.rows(), cols = m.cols(); int i = internal::random(2,5), j = internal::random(2,5); PlainObjectType::Map(ptr, rows, cols).setZero(); PlainObjectType::MapAligned(ptr, rows, cols).setZero(); PlainObjectType::Map(const_ptr, rows, cols).sum(); PlainObjectType::MapAligned(const_ptr, rows, cols).sum(); PlainObjectType::Map(ptr, rows, cols, InnerStride<>(i)).setZero(); PlainObjectType::MapAligned(ptr, rows, cols, InnerStride<>(i)).setZero(); PlainObjectType::Map(const_ptr, rows, cols, InnerStride<>(i)).sum(); PlainObjectType::MapAligned(const_ptr, rows, cols, InnerStride<>(i)).sum(); PlainObjectType::Map(ptr, rows, cols, InnerStride<2>()).setZero(); PlainObjectType::MapAligned(ptr, rows, cols, InnerStride<3>()).setZero(); PlainObjectType::Map(const_ptr, rows, cols, InnerStride<4>()).sum(); PlainObjectType::MapAligned(const_ptr, rows, cols, InnerStride<5>()).sum(); PlainObjectType::Map(ptr, rows, cols, OuterStride<>(i)).setZero(); PlainObjectType::MapAligned(ptr, rows, cols, OuterStride<>(i)).setZero(); PlainObjectType::Map(const_ptr, rows, cols, OuterStride<>(i)).sum(); PlainObjectType::MapAligned(const_ptr, rows, cols, OuterStride<>(i)).sum(); PlainObjectType::Map(ptr, rows, cols, OuterStride<2>()).setZero(); PlainObjectType::MapAligned(ptr, rows, cols, OuterStride<3>()).setZero(); PlainObjectType::Map(const_ptr, rows, cols, OuterStride<4>()).sum(); PlainObjectType::MapAligned(const_ptr, rows, cols, OuterStride<5>()).sum(); PlainObjectType::Map(ptr, rows, cols, Stride(i,j)).setZero(); PlainObjectType::MapAligned(ptr, rows, cols, Stride<2,Dynamic>(2,i)).setZero(); PlainObjectType::Map(const_ptr, rows, cols, Stride(i,3)).sum(); PlainObjectType::MapAligned(const_ptr, rows, cols, Stride(i,j)).sum(); PlainObjectType::Map(ptr, rows, cols, Stride<2,3>()).setZero(); PlainObjectType::MapAligned(ptr, rows, cols, Stride<3,4>()).setZero(); PlainObjectType::Map(const_ptr, rows, cols, Stride<2,4>()).sum(); PlainObjectType::MapAligned(const_ptr, rows, cols, Stride<5,3>()).sum(); } }; template struct mapstaticmethods_impl { static void run(const PlainObjectType& v) { Index size = v.size(); int i = internal::random(2,5); PlainObjectType::Map(ptr, size).setZero(); PlainObjectType::MapAligned(ptr, size).setZero(); PlainObjectType::Map(const_ptr, size).sum(); PlainObjectType::MapAligned(const_ptr, size).sum(); PlainObjectType::Map(ptr, size, InnerStride<>(i)).setZero(); PlainObjectType::MapAligned(ptr, size, InnerStride<>(i)).setZero(); PlainObjectType::Map(const_ptr, size, InnerStride<>(i)).sum(); PlainObjectType::MapAligned(const_ptr, size, InnerStride<>(i)).sum(); PlainObjectType::Map(ptr, size, InnerStride<2>()).setZero(); PlainObjectType::MapAligned(ptr, size, InnerStride<3>()).setZero(); PlainObjectType::Map(const_ptr, size, InnerStride<4>()).sum(); PlainObjectType::MapAligned(const_ptr, size, InnerStride<5>()).sum(); } }; template void mapstaticmethods(const PlainObjectType& m) { mapstaticmethods_impl::run(m); VERIFY(true); // just to avoid 'unused function' warning } EIGEN_DECLARE_TEST(mapstaticmethods) { ptr = internal::aligned_new(1000); for(int i = 0; i < 1000; i++) ptr[i] = float(i); const_ptr = ptr; CALL_SUBTEST_1(( mapstaticmethods(Matrix()) )); CALL_SUBTEST_1(( mapstaticmethods(Vector2f()) )); CALL_SUBTEST_2(( mapstaticmethods(Vector3f()) )); CALL_SUBTEST_2(( mapstaticmethods(Matrix2f()) )); CALL_SUBTEST_3(( mapstaticmethods(Matrix4f()) )); CALL_SUBTEST_3(( mapstaticmethods(Array4f()) )); CALL_SUBTEST_4(( mapstaticmethods(Array3f()) )); CALL_SUBTEST_4(( mapstaticmethods(Array33f()) )); CALL_SUBTEST_5(( mapstaticmethods(Array44f()) )); CALL_SUBTEST_5(( mapstaticmethods(VectorXf(1)) )); CALL_SUBTEST_5(( mapstaticmethods(VectorXf(8)) )); CALL_SUBTEST_6(( mapstaticmethods(MatrixXf(1,1)) )); CALL_SUBTEST_6(( mapstaticmethods(MatrixXf(5,7)) )); CALL_SUBTEST_7(( mapstaticmethods(ArrayXf(1)) )); CALL_SUBTEST_7(( mapstaticmethods(ArrayXf(5)) )); CALL_SUBTEST_8(( mapstaticmethods(ArrayXXf(1,1)) )); CALL_SUBTEST_8(( mapstaticmethods(ArrayXXf(8,6)) )); internal::aligned_delete(ptr, 1000); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/mapstride.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" template void map_class_vector(const VectorType& m) { typedef typename VectorType::Scalar Scalar; Index size = m.size(); VectorType v = VectorType::Random(size); Index arraysize = 3*size; Scalar* a_array = internal::aligned_new(arraysize+1); Scalar* array = a_array; if(Alignment!=Aligned) array = (Scalar*)(internal::IntPtr(a_array) + (internal::packet_traits::AlignedOnScalar?sizeof(Scalar):sizeof(typename NumTraits::Real))); { Map > map(array, size); map = v; for(int i = 0; i < size; ++i) { VERIFY(array[3*i] == v[i]); VERIFY(map[i] == v[i]); } } { Map > map(array, size, InnerStride(2)); map = v; for(int i = 0; i < size; ++i) { VERIFY(array[2*i] == v[i]); VERIFY(map[i] == v[i]); } } internal::aligned_delete(a_array, arraysize+1); } template void map_class_matrix(const MatrixType& _m) { typedef typename MatrixType::Scalar Scalar; Index rows = _m.rows(), cols = _m.cols(); MatrixType m = MatrixType::Random(rows,cols); Scalar s1 = internal::random(); Index arraysize = 4*(rows+4)*(cols+4); Scalar* a_array1 = internal::aligned_new(arraysize+1); Scalar* array1 = a_array1; if(Alignment!=Aligned) array1 = (Scalar*)(internal::IntPtr(a_array1) + (internal::packet_traits::AlignedOnScalar?sizeof(Scalar):sizeof(typename NumTraits::Real))); Scalar a_array2[256]; Scalar* array2 = a_array2; if(Alignment!=Aligned) array2 = (Scalar*)(internal::IntPtr(a_array2) + (internal::packet_traits::AlignedOnScalar?sizeof(Scalar):sizeof(typename NumTraits::Real))); else array2 = (Scalar*)(((internal::UIntPtr(a_array2)+EIGEN_MAX_ALIGN_BYTES-1)/EIGEN_MAX_ALIGN_BYTES)*EIGEN_MAX_ALIGN_BYTES); Index maxsize2 = a_array2 - array2 + 256; // test no inner stride and some dynamic outer stride for(int k=0; k<2; ++k) { if(k==1 && (m.innerSize()+1)*m.outerSize() > maxsize2) break; Scalar* array = (k==0 ? array1 : array2); Map > map(array, rows, cols, OuterStride(m.innerSize()+1)); map = m; VERIFY(map.outerStride() == map.innerSize()+1); for(int i = 0; i < m.outerSize(); ++i) for(int j = 0; j < m.innerSize(); ++j) { VERIFY(array[map.outerStride()*i+j] == m.coeffByOuterInner(i,j)); VERIFY(map.coeffByOuterInner(i,j) == m.coeffByOuterInner(i,j)); } VERIFY_IS_APPROX(s1*map,s1*m); map *= s1; VERIFY_IS_APPROX(map,s1*m); } // test no inner stride and an outer stride of +4. This is quite important as for fixed-size matrices, // this allows to hit the special case where it's vectorizable. for(int k=0; k<2; ++k) { if(k==1 && (m.innerSize()+4)*m.outerSize() > maxsize2) break; Scalar* array = (k==0 ? array1 : array2); enum { InnerSize = MatrixType::InnerSizeAtCompileTime, OuterStrideAtCompileTime = InnerSize==Dynamic ? Dynamic : InnerSize+4 }; Map > map(array, rows, cols, OuterStride(m.innerSize()+4)); map = m; VERIFY(map.outerStride() == map.innerSize()+4); for(int i = 0; i < m.outerSize(); ++i) for(int j = 0; j < m.innerSize(); ++j) { VERIFY(array[map.outerStride()*i+j] == m.coeffByOuterInner(i,j)); VERIFY(map.coeffByOuterInner(i,j) == m.coeffByOuterInner(i,j)); } VERIFY_IS_APPROX(s1*map,s1*m); map *= s1; VERIFY_IS_APPROX(map,s1*m); } // test both inner stride and outer stride for(int k=0; k<2; ++k) { if(k==1 && (2*m.innerSize()+1)*(m.outerSize()*2) > maxsize2) break; Scalar* array = (k==0 ? array1 : array2); Map > map(array, rows, cols, Stride(2*m.innerSize()+1, 2)); map = m; VERIFY(map.outerStride() == 2*map.innerSize()+1); VERIFY(map.innerStride() == 2); for(int i = 0; i < m.outerSize(); ++i) for(int j = 0; j < m.innerSize(); ++j) { VERIFY(array[map.outerStride()*i+map.innerStride()*j] == m.coeffByOuterInner(i,j)); VERIFY(map.coeffByOuterInner(i,j) == m.coeffByOuterInner(i,j)); } VERIFY_IS_APPROX(s1*map,s1*m); map *= s1; VERIFY_IS_APPROX(map,s1*m); } // test inner stride and no outer stride for(int k=0; k<2; ++k) { if(k==1 && (m.innerSize()*2)*m.outerSize() > maxsize2) break; Scalar* array = (k==0 ? array1 : array2); Map > map(array, rows, cols, InnerStride(2)); map = m; VERIFY(map.outerStride() == map.innerSize()*2); for(int i = 0; i < m.outerSize(); ++i) for(int j = 0; j < m.innerSize(); ++j) { VERIFY(array[map.innerSize()*i*2+j*2] == m.coeffByOuterInner(i,j)); VERIFY(map.coeffByOuterInner(i,j) == m.coeffByOuterInner(i,j)); } VERIFY_IS_APPROX(s1*map,s1*m); map *= s1; VERIFY_IS_APPROX(map,s1*m); } // test negative strides { Matrix::Map(a_array1, arraysize+1).setRandom(); Index outerstride = m.innerSize()+4; Scalar* array = array1; { Map > map1(array, rows, cols, OuterStride<>( outerstride)); Map > map2(array+(m.outerSize()-1)*outerstride, rows, cols, OuterStride<>(-outerstride)); if(MatrixType::IsRowMajor) VERIFY_IS_APPROX(map1.colwise().reverse(), map2); else VERIFY_IS_APPROX(map1.rowwise().reverse(), map2); } { Map > map1(array, rows, cols, OuterStride<>( outerstride)); Map > map2(array+(m.outerSize()-1)*outerstride+m.innerSize()-1, rows, cols, Stride(-outerstride,-1)); VERIFY_IS_APPROX(map1.reverse(), map2); } { Map > map1(array, rows, cols, OuterStride<>( outerstride)); Map > map2(array+(m.outerSize()-1)*outerstride+m.innerSize()-1, rows, cols, Stride(-outerstride,-1)); VERIFY_IS_APPROX(map1.reverse(), map2); } } internal::aligned_delete(a_array1, arraysize+1); } // Additional tests for inner-stride but no outer-stride template void bug1453() { const int data[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}; typedef Matrix RowMatrixXi; typedef Matrix ColMatrix23i; typedef Matrix ColMatrix32i; typedef Matrix RowMatrix23i; typedef Matrix RowMatrix32i; VERIFY_IS_APPROX(MatrixXi::Map(data, 2, 3, InnerStride<2>()), MatrixXi::Map(data, 2, 3, Stride<4,2>())); VERIFY_IS_APPROX(MatrixXi::Map(data, 2, 3, InnerStride<>(2)), MatrixXi::Map(data, 2, 3, Stride<4,2>())); VERIFY_IS_APPROX(MatrixXi::Map(data, 3, 2, InnerStride<2>()), MatrixXi::Map(data, 3, 2, Stride<6,2>())); VERIFY_IS_APPROX(MatrixXi::Map(data, 3, 2, InnerStride<>(2)), MatrixXi::Map(data, 3, 2, Stride<6,2>())); VERIFY_IS_APPROX(RowMatrixXi::Map(data, 2, 3, InnerStride<2>()), RowMatrixXi::Map(data, 2, 3, Stride<6,2>())); VERIFY_IS_APPROX(RowMatrixXi::Map(data, 2, 3, InnerStride<>(2)), RowMatrixXi::Map(data, 2, 3, Stride<6,2>())); VERIFY_IS_APPROX(RowMatrixXi::Map(data, 3, 2, InnerStride<2>()), RowMatrixXi::Map(data, 3, 2, Stride<4,2>())); VERIFY_IS_APPROX(RowMatrixXi::Map(data, 3, 2, InnerStride<>(2)), RowMatrixXi::Map(data, 3, 2, Stride<4,2>())); VERIFY_IS_APPROX(ColMatrix23i::Map(data, InnerStride<2>()), MatrixXi::Map(data, 2, 3, Stride<4,2>())); VERIFY_IS_APPROX(ColMatrix23i::Map(data, InnerStride<>(2)), MatrixXi::Map(data, 2, 3, Stride<4,2>())); VERIFY_IS_APPROX(ColMatrix32i::Map(data, InnerStride<2>()), MatrixXi::Map(data, 3, 2, Stride<6,2>())); VERIFY_IS_APPROX(ColMatrix32i::Map(data, InnerStride<>(2)), MatrixXi::Map(data, 3, 2, Stride<6,2>())); VERIFY_IS_APPROX(RowMatrix23i::Map(data, InnerStride<2>()), RowMatrixXi::Map(data, 2, 3, Stride<6,2>())); VERIFY_IS_APPROX(RowMatrix23i::Map(data, InnerStride<>(2)), RowMatrixXi::Map(data, 2, 3, Stride<6,2>())); VERIFY_IS_APPROX(RowMatrix32i::Map(data, InnerStride<2>()), RowMatrixXi::Map(data, 3, 2, Stride<4,2>())); VERIFY_IS_APPROX(RowMatrix32i::Map(data, InnerStride<>(2)), RowMatrixXi::Map(data, 3, 2, Stride<4,2>())); } EIGEN_DECLARE_TEST(mapstride) { for(int i = 0; i < g_repeat; i++) { int maxn = 3; CALL_SUBTEST_1( map_class_vector(Matrix()) ); CALL_SUBTEST_1( map_class_vector(Matrix()) ); CALL_SUBTEST_2( map_class_vector(Vector4d()) ); CALL_SUBTEST_2( map_class_vector(Vector4d()) ); CALL_SUBTEST_3( map_class_vector(RowVector4f()) ); CALL_SUBTEST_3( map_class_vector(RowVector4f()) ); CALL_SUBTEST_4( map_class_vector(VectorXcf(internal::random(1,maxn))) ); CALL_SUBTEST_4( map_class_vector(VectorXcf(internal::random(1,maxn))) ); CALL_SUBTEST_5( map_class_vector(VectorXi(internal::random(1,maxn))) ); CALL_SUBTEST_5( map_class_vector(VectorXi(internal::random(1,maxn))) ); CALL_SUBTEST_1( map_class_matrix(Matrix()) ); CALL_SUBTEST_1( map_class_matrix(Matrix()) ); CALL_SUBTEST_2( map_class_matrix(Matrix4d()) ); CALL_SUBTEST_2( map_class_matrix(Matrix4d()) ); CALL_SUBTEST_3( map_class_matrix(Matrix()) ); CALL_SUBTEST_3( map_class_matrix(Matrix()) ); CALL_SUBTEST_3( map_class_matrix(Matrix()) ); CALL_SUBTEST_3( map_class_matrix(Matrix()) ); CALL_SUBTEST_4( map_class_matrix(MatrixXcf(internal::random(1,maxn),internal::random(1,maxn))) ); CALL_SUBTEST_4( map_class_matrix(MatrixXcf(internal::random(1,maxn),internal::random(1,maxn))) ); CALL_SUBTEST_5( map_class_matrix(MatrixXi(internal::random(1,maxn),internal::random(1,maxn))) ); CALL_SUBTEST_5( map_class_matrix(MatrixXi(internal::random(1,maxn),internal::random(1,maxn))) ); CALL_SUBTEST_6( map_class_matrix(MatrixXcd(internal::random(1,maxn),internal::random(1,maxn))) ); CALL_SUBTEST_6( map_class_matrix(MatrixXcd(internal::random(1,maxn),internal::random(1,maxn))) ); CALL_SUBTEST_5( bug1453<0>() ); TEST_SET_BUT_UNUSED_VARIABLE(maxn); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/meta.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" template bool check_is_convertible(const From&, const To&) { return internal::is_convertible::value; } struct FooReturnType { typedef int ReturnType; }; struct MyInterface { virtual void func() = 0; virtual ~MyInterface() {} }; struct MyImpl : public MyInterface { void func() {} }; EIGEN_DECLARE_TEST(meta) { VERIFY((internal::conditional<(3<4),internal::true_type, internal::false_type>::type::value)); VERIFY(( internal::is_same::value)); VERIFY((!internal::is_same::value)); VERIFY((!internal::is_same::value)); VERIFY((!internal::is_same::value)); VERIFY(( internal::is_same::type >::value)); VERIFY(( internal::is_same::type >::value)); VERIFY(( internal::is_same::type >::value)); VERIFY(( internal::is_same::type >::value)); VERIFY(( internal::is_same::type >::value)); VERIFY(( internal::is_same::type >::value)); VERIFY(( internal::is_same::type >::value)); // test add_const VERIFY(( internal::is_same< internal::add_const::type, const float >::value)); VERIFY(( internal::is_same< internal::add_const::type, float* const>::value)); VERIFY(( internal::is_same< internal::add_const::type, float const* const>::value)); VERIFY(( internal::is_same< internal::add_const::type, float& >::value)); // test remove_const VERIFY(( internal::is_same< internal::remove_const::type, float const* >::value)); VERIFY(( internal::is_same< internal::remove_const::type, float const* >::value)); VERIFY(( internal::is_same< internal::remove_const::type, float* >::value)); // test add_const_on_value_type VERIFY(( internal::is_same< internal::add_const_on_value_type::type, float const& >::value)); VERIFY(( internal::is_same< internal::add_const_on_value_type::type, float const* >::value)); VERIFY(( internal::is_same< internal::add_const_on_value_type::type, const float >::value)); VERIFY(( internal::is_same< internal::add_const_on_value_type::type, const float >::value)); VERIFY(( internal::is_same< internal::add_const_on_value_type::type, const float* const>::value)); VERIFY(( internal::is_same< internal::add_const_on_value_type::type, const float* const>::value)); VERIFY(( internal::is_same::type >::value)); VERIFY(( internal::is_same::type >::value)); VERIFY(( internal::is_same::type >::value)); VERIFY(( internal::is_same::type >::value)); VERIFY(( internal::is_same::type >::value)); // is_convertible STATIC_CHECK(( internal::is_convertible::value )); STATIC_CHECK(( internal::is_convertible::value )); STATIC_CHECK(( internal::is_convertible::value )); STATIC_CHECK(( internal::is_convertible::value )); STATIC_CHECK(( internal::is_convertible::value )); STATIC_CHECK(( internal::is_convertible >::value )); STATIC_CHECK((!internal::is_convertible,double>::value )); STATIC_CHECK(( internal::is_convertible::value )); STATIC_CHECK(( internal::is_convertible::value )); STATIC_CHECK(( internal::is_convertible::value )); STATIC_CHECK(( internal::is_convertible::value )); STATIC_CHECK(( internal::is_convertible::value )); STATIC_CHECK(( internal::is_convertible::value )); STATIC_CHECK((!internal::is_convertible::value )); STATIC_CHECK((!internal::is_convertible::value )); STATIC_CHECK(!( internal::is_convertible::value )); STATIC_CHECK(!( internal::is_convertible::value )); STATIC_CHECK(( internal::is_convertible::value )); //STATIC_CHECK((!internal::is_convertible::value )); //does not even compile because the conversion is prevented by a static assertion STATIC_CHECK((!internal::is_convertible::value )); STATIC_CHECK((!internal::is_convertible::value )); { float f = 0.0f; MatrixXf A, B; VectorXf a, b; VERIFY(( check_is_convertible(a.dot(b), f) )); VERIFY(( check_is_convertible(a.transpose()*b, f) )); VERIFY((!check_is_convertible(A*B, f) )); VERIFY(( check_is_convertible(A*B, A) )); } #if (EIGEN_COMP_GNUC && EIGEN_COMP_GNUC <= 99) \ || (EIGEN_COMP_CLANG && EIGEN_COMP_CLANG <= 909) \ || (EIGEN_COMP_MSVC && EIGEN_COMP_MSVC <=1914) // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1752, // basically, a fix in the c++ standard breaks our c++98 implementation // of is_convertible for abstract classes. // So the following tests are expected to fail with recent compilers. STATIC_CHECK(( !internal::is_convertible::value )); #if (!EIGEN_COMP_GNUC_STRICT) || (EIGEN_GNUC_AT_LEAST(4,8)) // GCC prior to 4.8 fails to compile this test: // error: cannot allocate an object of abstract type 'MyInterface' // In other word, it does not obey SFINAE. // Nevertheless, we don't really care about supporting abstract type as scalar type! STATIC_CHECK(( !internal::is_convertible::value )); #endif STATIC_CHECK(( internal::is_convertible::value )); #endif { int i = 0; VERIFY(( check_is_convertible(fix<3>(), i) )); VERIFY((!check_is_convertible(i, fix()) )); } VERIFY(( internal::has_ReturnType::value )); VERIFY(( internal::has_ReturnType >::value )); VERIFY(( !internal::has_ReturnType::value )); VERIFY(( !internal::has_ReturnType::value )); VERIFY(internal::meta_sqrt<1>::ret == 1); #define VERIFY_META_SQRT(X) VERIFY(internal::meta_sqrt::ret == int(std::sqrt(double(X)))) VERIFY_META_SQRT(2); VERIFY_META_SQRT(3); VERIFY_META_SQRT(4); VERIFY_META_SQRT(5); VERIFY_META_SQRT(6); VERIFY_META_SQRT(8); VERIFY_META_SQRT(9); VERIFY_META_SQRT(15); VERIFY_META_SQRT(16); VERIFY_META_SQRT(17); VERIFY_META_SQRT(255); VERIFY_META_SQRT(256); VERIFY_META_SQRT(257); VERIFY_META_SQRT(1023); VERIFY_META_SQRT(1024); VERIFY_META_SQRT(1025); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/metis_support.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "sparse_solver.h" #include #include #include template void test_metis_T() { SparseLU, MetisOrdering > sparselu_metis; check_sparse_square_solving(sparselu_metis); } EIGEN_DECLARE_TEST(metis_support) { CALL_SUBTEST_1(test_metis_T()); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/miscmatrices.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" template void miscMatrices(const MatrixType& m) { /* this test covers the following files: DiagonalMatrix.h Ones.h */ typedef typename MatrixType::Scalar Scalar; typedef Matrix VectorType; Index rows = m.rows(); Index cols = m.cols(); Index r = internal::random(0, rows-1), r2 = internal::random(0, rows-1), c = internal::random(0, cols-1); VERIFY_IS_APPROX(MatrixType::Ones(rows,cols)(r,c), static_cast(1)); MatrixType m1 = MatrixType::Ones(rows,cols); VERIFY_IS_APPROX(m1(r,c), static_cast(1)); VectorType v1 = VectorType::Random(rows); v1[0]; Matrix square(v1.asDiagonal()); if(r==r2) VERIFY_IS_APPROX(square(r,r2), v1[r]); else VERIFY_IS_MUCH_SMALLER_THAN(square(r,r2), static_cast(1)); square = MatrixType::Zero(rows, rows); square.diagonal() = VectorType::Ones(rows); VERIFY_IS_APPROX(square, MatrixType::Identity(rows, rows)); } EIGEN_DECLARE_TEST(miscmatrices) { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( miscMatrices(Matrix()) ); CALL_SUBTEST_2( miscMatrices(Matrix4d()) ); CALL_SUBTEST_3( miscMatrices(MatrixXcf(3, 3)) ); CALL_SUBTEST_4( miscMatrices(MatrixXi(8, 12)) ); CALL_SUBTEST_5( miscMatrices(MatrixXcd(20, 20)) ); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/mixingtypes.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2015 Gael Guennebaud // Copyright (C) 2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #if defined(EIGEN_TEST_PART_7) // ignore double-promotion diagnostic for clang and gcc, if we check for static assertion anyway: // TODO do the same for MSVC? #if defined(__clang__) # if (__clang_major__ * 100 + __clang_minor__) >= 308 # pragma clang diagnostic ignored "-Wdouble-promotion" # endif #elif defined(__GNUC__) // TODO is there a minimal GCC version for this? At least g++-4.7 seems to be fine with this. # pragma GCC diagnostic ignored "-Wdouble-promotion" #endif #endif #if defined(EIGEN_TEST_PART_1) || defined(EIGEN_TEST_PART_2) || defined(EIGEN_TEST_PART_3) #ifndef EIGEN_DONT_VECTORIZE #define EIGEN_DONT_VECTORIZE #endif #endif static bool g_called; #define EIGEN_SCALAR_BINARY_OP_PLUGIN { g_called |= (!internal::is_same::value); } #include "main.h" using namespace std; #define VERIFY_MIX_SCALAR(XPR,REF) \ g_called = false; \ VERIFY_IS_APPROX(XPR,REF); \ VERIFY( g_called && #XPR" not properly optimized"); template void mixingtypes(int size = SizeAtCompileType) { typedef std::complex CF; typedef std::complex CD; typedef Matrix Mat_f; typedef Matrix Mat_d; typedef Matrix, SizeAtCompileType, SizeAtCompileType> Mat_cf; typedef Matrix, SizeAtCompileType, SizeAtCompileType> Mat_cd; typedef Matrix Vec_f; typedef Matrix Vec_d; typedef Matrix, SizeAtCompileType, 1> Vec_cf; typedef Matrix, SizeAtCompileType, 1> Vec_cd; Mat_f mf = Mat_f::Random(size,size); Mat_d md = mf.template cast(); //Mat_d rd = md; Mat_cf mcf = Mat_cf::Random(size,size); Mat_cd mcd = mcf.template cast >(); Mat_cd rcd = mcd; Vec_f vf = Vec_f::Random(size,1); Vec_d vd = vf.template cast(); Vec_cf vcf = Vec_cf::Random(size,1); Vec_cd vcd = vcf.template cast >(); float sf = internal::random(); double sd = internal::random(); complex scf = internal::random >(); complex scd = internal::random >(); mf+mf; float epsf = std::sqrt(std::numeric_limits ::min EIGEN_EMPTY ()); double epsd = std::sqrt(std::numeric_limits::min EIGEN_EMPTY ()); while(std::abs(sf )(); while(std::abs(sd )(); while(std::abs(scf)(); while(std::abs(scd)(); // check scalar products VERIFY_MIX_SCALAR(vcf * sf , vcf * complex(sf)); VERIFY_MIX_SCALAR(sd * vcd , complex(sd) * vcd); VERIFY_MIX_SCALAR(vf * scf , vf.template cast >() * scf); VERIFY_MIX_SCALAR(scd * vd , scd * vd.template cast >()); VERIFY_MIX_SCALAR(vcf * 2 , vcf * complex(2)); VERIFY_MIX_SCALAR(vcf * 2.1 , vcf * complex(2.1)); VERIFY_MIX_SCALAR(2 * vcf, vcf * complex(2)); VERIFY_MIX_SCALAR(2.1 * vcf , vcf * complex(2.1)); // check scalar quotients VERIFY_MIX_SCALAR(vcf / sf , vcf / complex(sf)); VERIFY_MIX_SCALAR(vf / scf , vf.template cast >() / scf); VERIFY_MIX_SCALAR(vf.array() / scf, vf.template cast >().array() / scf); VERIFY_MIX_SCALAR(scd / vd.array() , scd / vd.template cast >().array()); // check scalar increment VERIFY_MIX_SCALAR(vcf.array() + sf , vcf.array() + complex(sf)); VERIFY_MIX_SCALAR(sd + vcd.array(), complex(sd) + vcd.array()); VERIFY_MIX_SCALAR(vf.array() + scf, vf.template cast >().array() + scf); VERIFY_MIX_SCALAR(scd + vd.array() , scd + vd.template cast >().array()); // check scalar subtractions VERIFY_MIX_SCALAR(vcf.array() - sf , vcf.array() - complex(sf)); VERIFY_MIX_SCALAR(sd - vcd.array(), complex(sd) - vcd.array()); VERIFY_MIX_SCALAR(vf.array() - scf, vf.template cast >().array() - scf); VERIFY_MIX_SCALAR(scd - vd.array() , scd - vd.template cast >().array()); // check scalar powers VERIFY_MIX_SCALAR( pow(vcf.array(), sf), Eigen::pow(vcf.array(), complex(sf)) ); VERIFY_MIX_SCALAR( vcf.array().pow(sf) , Eigen::pow(vcf.array(), complex(sf)) ); VERIFY_MIX_SCALAR( pow(sd, vcd.array()), Eigen::pow(complex(sd), vcd.array()) ); VERIFY_MIX_SCALAR( Eigen::pow(vf.array(), scf), Eigen::pow(vf.template cast >().array(), scf) ); VERIFY_MIX_SCALAR( vf.array().pow(scf) , Eigen::pow(vf.template cast >().array(), scf) ); VERIFY_MIX_SCALAR( Eigen::pow(scd, vd.array()), Eigen::pow(scd, vd.template cast >().array()) ); // check dot product vf.dot(vf); VERIFY_IS_APPROX(vcf.dot(vf), vcf.dot(vf.template cast >())); // check diagonal product VERIFY_IS_APPROX(vf.asDiagonal() * mcf, vf.template cast >().asDiagonal() * mcf); VERIFY_IS_APPROX(vcd.asDiagonal() * md, vcd.asDiagonal() * md.template cast >()); VERIFY_IS_APPROX(mcf * vf.asDiagonal(), mcf * vf.template cast >().asDiagonal()); VERIFY_IS_APPROX(md * vcd.asDiagonal(), md.template cast >() * vcd.asDiagonal()); // check inner product VERIFY_IS_APPROX((vf.transpose() * vcf).value(), (vf.template cast >().transpose() * vcf).value()); // check outer product VERIFY_IS_APPROX((vf * vcf.transpose()).eval(), (vf.template cast >() * vcf.transpose()).eval()); // coeff wise product VERIFY_IS_APPROX((vf * vcf.transpose()).eval(), (vf.template cast >() * vcf.transpose()).eval()); Mat_cd mcd2 = mcd; VERIFY_IS_APPROX(mcd.array() *= md.array(), mcd2.array() *= md.array().template cast >()); // check matrix-matrix products VERIFY_IS_APPROX(sd*md*mcd, (sd*md).template cast().eval()*mcd); VERIFY_IS_APPROX(sd*mcd*md, sd*mcd*md.template cast()); VERIFY_IS_APPROX(scd*md*mcd, scd*md.template cast().eval()*mcd); VERIFY_IS_APPROX(scd*mcd*md, scd*mcd*md.template cast()); VERIFY_IS_APPROX(sf*mf*mcf, sf*mf.template cast()*mcf); VERIFY_IS_APPROX(sf*mcf*mf, sf*mcf*mf.template cast()); VERIFY_IS_APPROX(scf*mf*mcf, scf*mf.template cast()*mcf); VERIFY_IS_APPROX(scf*mcf*mf, scf*mcf*mf.template cast()); VERIFY_IS_APPROX(sd*md.adjoint()*mcd, (sd*md).template cast().eval().adjoint()*mcd); VERIFY_IS_APPROX(sd*mcd.adjoint()*md, sd*mcd.adjoint()*md.template cast()); VERIFY_IS_APPROX(sd*md.adjoint()*mcd.adjoint(), (sd*md).template cast().eval().adjoint()*mcd.adjoint()); VERIFY_IS_APPROX(sd*mcd.adjoint()*md.adjoint(), sd*mcd.adjoint()*md.template cast().adjoint()); VERIFY_IS_APPROX(sd*md*mcd.adjoint(), (sd*md).template cast().eval()*mcd.adjoint()); VERIFY_IS_APPROX(sd*mcd*md.adjoint(), sd*mcd*md.template cast().adjoint()); VERIFY_IS_APPROX(sf*mf.adjoint()*mcf, (sf*mf).template cast().eval().adjoint()*mcf); VERIFY_IS_APPROX(sf*mcf.adjoint()*mf, sf*mcf.adjoint()*mf.template cast()); VERIFY_IS_APPROX(sf*mf.adjoint()*mcf.adjoint(), (sf*mf).template cast().eval().adjoint()*mcf.adjoint()); VERIFY_IS_APPROX(sf*mcf.adjoint()*mf.adjoint(), sf*mcf.adjoint()*mf.template cast().adjoint()); VERIFY_IS_APPROX(sf*mf*mcf.adjoint(), (sf*mf).template cast().eval()*mcf.adjoint()); VERIFY_IS_APPROX(sf*mcf*mf.adjoint(), sf*mcf*mf.template cast().adjoint()); VERIFY_IS_APPROX(sf*mf*vcf, (sf*mf).template cast().eval()*vcf); VERIFY_IS_APPROX(scf*mf*vcf,(scf*mf.template cast()).eval()*vcf); VERIFY_IS_APPROX(sf*mcf*vf, sf*mcf*vf.template cast()); VERIFY_IS_APPROX(scf*mcf*vf,scf*mcf*vf.template cast()); VERIFY_IS_APPROX(sf*vcf.adjoint()*mf, sf*vcf.adjoint()*mf.template cast().eval()); VERIFY_IS_APPROX(scf*vcf.adjoint()*mf, scf*vcf.adjoint()*mf.template cast().eval()); VERIFY_IS_APPROX(sf*vf.adjoint()*mcf, sf*vf.adjoint().template cast().eval()*mcf); VERIFY_IS_APPROX(scf*vf.adjoint()*mcf, scf*vf.adjoint().template cast().eval()*mcf); VERIFY_IS_APPROX(sd*md*vcd, (sd*md).template cast().eval()*vcd); VERIFY_IS_APPROX(scd*md*vcd,(scd*md.template cast()).eval()*vcd); VERIFY_IS_APPROX(sd*mcd*vd, sd*mcd*vd.template cast().eval()); VERIFY_IS_APPROX(scd*mcd*vd,scd*mcd*vd.template cast().eval()); VERIFY_IS_APPROX(sd*vcd.adjoint()*md, sd*vcd.adjoint()*md.template cast().eval()); VERIFY_IS_APPROX(scd*vcd.adjoint()*md, scd*vcd.adjoint()*md.template cast().eval()); VERIFY_IS_APPROX(sd*vd.adjoint()*mcd, sd*vd.adjoint().template cast().eval()*mcd); VERIFY_IS_APPROX(scd*vd.adjoint()*mcd, scd*vd.adjoint().template cast().eval()*mcd); VERIFY_IS_APPROX( sd*vcd.adjoint()*md.template triangularView(), sd*vcd.adjoint()*md.template cast().eval().template triangularView()); VERIFY_IS_APPROX(scd*vcd.adjoint()*md.template triangularView(), scd*vcd.adjoint()*md.template cast().eval().template triangularView()); VERIFY_IS_APPROX( sd*vcd.adjoint()*md.transpose().template triangularView(), sd*vcd.adjoint()*md.transpose().template cast().eval().template triangularView()); VERIFY_IS_APPROX(scd*vcd.adjoint()*md.transpose().template triangularView(), scd*vcd.adjoint()*md.transpose().template cast().eval().template triangularView()); VERIFY_IS_APPROX( sd*vd.adjoint()*mcd.template triangularView(), sd*vd.adjoint().template cast().eval()*mcd.template triangularView()); VERIFY_IS_APPROX(scd*vd.adjoint()*mcd.template triangularView(), scd*vd.adjoint().template cast().eval()*mcd.template triangularView()); VERIFY_IS_APPROX( sd*vd.adjoint()*mcd.transpose().template triangularView(), sd*vd.adjoint().template cast().eval()*mcd.transpose().template triangularView()); VERIFY_IS_APPROX(scd*vd.adjoint()*mcd.transpose().template triangularView(), scd*vd.adjoint().template cast().eval()*mcd.transpose().template triangularView()); // Not supported yet: trmm // VERIFY_IS_APPROX(sd*mcd*md.template triangularView(), sd*mcd*md.template cast().eval().template triangularView()); // VERIFY_IS_APPROX(scd*mcd*md.template triangularView(), scd*mcd*md.template cast().eval().template triangularView()); // VERIFY_IS_APPROX(sd*md*mcd.template triangularView(), sd*md.template cast().eval()*mcd.template triangularView()); // VERIFY_IS_APPROX(scd*md*mcd.template triangularView(), scd*md.template cast().eval()*mcd.template triangularView()); // Not supported yet: symv // VERIFY_IS_APPROX(sd*vcd.adjoint()*md.template selfadjointView(), sd*vcd.adjoint()*md.template cast().eval().template selfadjointView()); // VERIFY_IS_APPROX(scd*vcd.adjoint()*md.template selfadjointView(), scd*vcd.adjoint()*md.template cast().eval().template selfadjointView()); // VERIFY_IS_APPROX(sd*vd.adjoint()*mcd.template selfadjointView(), sd*vd.adjoint().template cast().eval()*mcd.template selfadjointView()); // VERIFY_IS_APPROX(scd*vd.adjoint()*mcd.template selfadjointView(), scd*vd.adjoint().template cast().eval()*mcd.template selfadjointView()); // Not supported yet: symm // VERIFY_IS_APPROX(sd*vcd.adjoint()*md.template selfadjointView(), sd*vcd.adjoint()*md.template cast().eval().template selfadjointView()); // VERIFY_IS_APPROX(scd*vcd.adjoint()*md.template selfadjointView(), scd*vcd.adjoint()*md.template cast().eval().template selfadjointView()); // VERIFY_IS_APPROX(sd*vd.adjoint()*mcd.template selfadjointView(), sd*vd.adjoint().template cast().eval()*mcd.template selfadjointView()); // VERIFY_IS_APPROX(scd*vd.adjoint()*mcd.template selfadjointView(), scd*vd.adjoint().template cast().eval()*mcd.template selfadjointView()); rcd.setZero(); VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView() = sd * mcd * md), Mat_cd((sd * mcd * md.template cast().eval()).template triangularView())); VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView() = sd * md * mcd), Mat_cd((sd * md.template cast().eval() * mcd).template triangularView())); VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView() = scd * mcd * md), Mat_cd((scd * mcd * md.template cast().eval()).template triangularView())); VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView() = scd * md * mcd), Mat_cd((scd * md.template cast().eval() * mcd).template triangularView())); VERIFY_IS_APPROX( md.array() * mcd.array(), md.template cast().eval().array() * mcd.array() ); VERIFY_IS_APPROX( mcd.array() * md.array(), mcd.array() * md.template cast().eval().array() ); VERIFY_IS_APPROX( md.array() + mcd.array(), md.template cast().eval().array() + mcd.array() ); VERIFY_IS_APPROX( mcd.array() + md.array(), mcd.array() + md.template cast().eval().array() ); VERIFY_IS_APPROX( md.array() - mcd.array(), md.template cast().eval().array() - mcd.array() ); VERIFY_IS_APPROX( mcd.array() - md.array(), mcd.array() - md.template cast().eval().array() ); if(mcd.array().abs().minCoeff()>epsd) { VERIFY_IS_APPROX( md.array() / mcd.array(), md.template cast().eval().array() / mcd.array() ); } if(md.array().abs().minCoeff()>epsd) { VERIFY_IS_APPROX( mcd.array() / md.array(), mcd.array() / md.template cast().eval().array() ); } if(md.array().abs().minCoeff()>epsd || mcd.array().abs().minCoeff()>epsd) { VERIFY_IS_APPROX( md.array().pow(mcd.array()), md.template cast().eval().array().pow(mcd.array()) ); VERIFY_IS_APPROX( mcd.array().pow(md.array()), mcd.array().pow(md.template cast().eval().array()) ); VERIFY_IS_APPROX( pow(md.array(),mcd.array()), md.template cast().eval().array().pow(mcd.array()) ); VERIFY_IS_APPROX( pow(mcd.array(),md.array()), mcd.array().pow(md.template cast().eval().array()) ); } rcd = mcd; VERIFY_IS_APPROX( rcd = md, md.template cast().eval() ); rcd = mcd; VERIFY_IS_APPROX( rcd += md, mcd + md.template cast().eval() ); rcd = mcd; VERIFY_IS_APPROX( rcd -= md, mcd - md.template cast().eval() ); rcd = mcd; VERIFY_IS_APPROX( rcd.array() *= md.array(), mcd.array() * md.template cast().eval().array() ); rcd = mcd; if(md.array().abs().minCoeff()>epsd) { VERIFY_IS_APPROX( rcd.array() /= md.array(), mcd.array() / md.template cast().eval().array() ); } rcd = mcd; VERIFY_IS_APPROX( rcd.noalias() += md + mcd*md, mcd + (md.template cast().eval()) + mcd*(md.template cast().eval())); VERIFY_IS_APPROX( rcd.noalias() = md*md, ((md*md).eval().template cast()) ); rcd = mcd; VERIFY_IS_APPROX( rcd.noalias() += md*md, mcd + ((md*md).eval().template cast()) ); rcd = mcd; VERIFY_IS_APPROX( rcd.noalias() -= md*md, mcd - ((md*md).eval().template cast()) ); VERIFY_IS_APPROX( rcd.noalias() = mcd + md*md, mcd + ((md*md).eval().template cast()) ); rcd = mcd; VERIFY_IS_APPROX( rcd.noalias() += mcd + md*md, mcd + mcd + ((md*md).eval().template cast()) ); rcd = mcd; VERIFY_IS_APPROX( rcd.noalias() -= mcd + md*md, - ((md*md).eval().template cast()) ); } EIGEN_DECLARE_TEST(mixingtypes) { g_called = false; // Silence -Wunneeded-internal-declaration. for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(mixingtypes<3>()); CALL_SUBTEST_2(mixingtypes<4>()); CALL_SUBTEST_3(mixingtypes(internal::random(1,EIGEN_TEST_MAX_SIZE))); CALL_SUBTEST_4(mixingtypes<3>()); CALL_SUBTEST_5(mixingtypes<4>()); CALL_SUBTEST_6(mixingtypes(internal::random(1,EIGEN_TEST_MAX_SIZE))); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/mpl2only.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MPL2_ONLY #define EIGEN_MPL2_ONLY #endif #include #include #include #include #include #include #include int main() { return 0; } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/nestbyvalue.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2019 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define TEST_ENABLE_TEMPORARY_TRACKING #include "main.h" typedef NestByValue CpyMatrixXd; typedef CwiseBinaryOp,const CpyMatrixXd,const CpyMatrixXd> XprType; XprType get_xpr_with_temps(const MatrixXd& a) { MatrixXd t1 = a.rowwise().reverse(); MatrixXd t2 = a+a; return t1.nestByValue() + t2.nestByValue(); } EIGEN_DECLARE_TEST(nestbyvalue) { for(int i = 0; i < g_repeat; i++) { Index rows = internal::random(1,EIGEN_TEST_MAX_SIZE); Index cols = internal::random(1,EIGEN_TEST_MAX_SIZE); MatrixXd a = MatrixXd(rows,cols); nb_temporaries = 0; XprType x = get_xpr_with_temps(a); VERIFY_IS_EQUAL(nb_temporaries,6); MatrixXd b = x; VERIFY_IS_EQUAL(nb_temporaries,6+1); VERIFY_IS_APPROX(b, a.rowwise().reverse().eval() + (a+a).eval()); } } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/nesting_ops.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Hauke Heibel // Copyright (C) 2015 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define TEST_ENABLE_TEMPORARY_TRACKING #include "main.h" template void use_n_times(const XprType &xpr) { typename internal::nested_eval::type mat(xpr); typename XprType::PlainObject res(mat.rows(), mat.cols()); nb_temporaries--; // remove res res.setZero(); for(int i=0; i bool verify_eval_type(const XprType &, const ReferenceType&) { typedef typename internal::nested_eval::type EvalType; return internal::is_same::type, typename internal::remove_all::type>::value; } template void run_nesting_ops_1(const MatrixType& _m) { typename internal::nested_eval::type m(_m); // Make really sure that we are in debug mode! VERIFY_RAISES_ASSERT(eigen_assert(false)); // The only intention of these tests is to ensure that this code does // not trigger any asserts or segmentation faults... more to come. VERIFY_IS_APPROX( (m.transpose() * m).diagonal().sum(), (m.transpose() * m).diagonal().sum() ); VERIFY_IS_APPROX( (m.transpose() * m).diagonal().array().abs().sum(), (m.transpose() * m).diagonal().array().abs().sum() ); VERIFY_IS_APPROX( (m.transpose() * m).array().abs().sum(), (m.transpose() * m).array().abs().sum() ); } template void run_nesting_ops_2(const MatrixType& _m) { typedef typename MatrixType::Scalar Scalar; Index rows = _m.rows(); Index cols = _m.cols(); MatrixType m1 = MatrixType::Random(rows,cols); Matrix m2; if((MatrixType::SizeAtCompileTime==Dynamic)) { VERIFY_EVALUATION_COUNT( use_n_times<1>(m1 + m1*m1), 1 ); VERIFY_EVALUATION_COUNT( use_n_times<10>(m1 + m1*m1), 1 ); VERIFY_EVALUATION_COUNT( use_n_times<1>(m1.template triangularView().solve(m1.col(0))), 1 ); VERIFY_EVALUATION_COUNT( use_n_times<10>(m1.template triangularView().solve(m1.col(0))), 1 ); VERIFY_EVALUATION_COUNT( use_n_times<1>(Scalar(2)*m1.template triangularView().solve(m1.col(0))), 2 ); // FIXME could be one by applying the scaling in-place on the solve result VERIFY_EVALUATION_COUNT( use_n_times<1>(m1.col(0)+m1.template triangularView().solve(m1.col(0))), 2 ); // FIXME could be one by adding m1.col() inplace VERIFY_EVALUATION_COUNT( use_n_times<10>(m1.col(0)+m1.template triangularView().solve(m1.col(0))), 2 ); } { VERIFY( verify_eval_type<10>(m1, m1) ); if(!NumTraits::IsComplex) { VERIFY( verify_eval_type<3>(2*m1, 2*m1) ); VERIFY( verify_eval_type<4>(2*m1, m1) ); } else { VERIFY( verify_eval_type<2>(2*m1, 2*m1) ); VERIFY( verify_eval_type<3>(2*m1, m1) ); } VERIFY( verify_eval_type<2>(m1+m1, m1+m1) ); VERIFY( verify_eval_type<3>(m1+m1, m1) ); VERIFY( verify_eval_type<1>(m1*m1.transpose(), m2) ); VERIFY( verify_eval_type<1>(m1*(m1+m1).transpose(), m2) ); VERIFY( verify_eval_type<2>(m1*m1.transpose(), m2) ); VERIFY( verify_eval_type<1>(m1+m1*m1, m1) ); VERIFY( verify_eval_type<1>(m1.template triangularView().solve(m1), m1) ); VERIFY( verify_eval_type<1>(m1+m1.template triangularView().solve(m1), m1) ); } } EIGEN_DECLARE_TEST(nesting_ops) { CALL_SUBTEST_1(run_nesting_ops_1(MatrixXf::Random(25,25))); CALL_SUBTEST_2(run_nesting_ops_1(MatrixXcd::Random(25,25))); CALL_SUBTEST_3(run_nesting_ops_1(Matrix4f::Random())); CALL_SUBTEST_4(run_nesting_ops_1(Matrix2d::Random())); Index s = internal::random(1,EIGEN_TEST_MAX_SIZE); CALL_SUBTEST_1( run_nesting_ops_2(MatrixXf(s,s)) ); CALL_SUBTEST_2( run_nesting_ops_2(MatrixXcd(s,s)) ); CALL_SUBTEST_3( run_nesting_ops_2(Matrix4f()) ); CALL_SUBTEST_4( run_nesting_ops_2(Matrix2d()) ); TEST_SET_BUT_UNUSED_VARIABLE(s) } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/nomalloc.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // discard stack allocation as that too bypasses malloc #define EIGEN_STACK_ALLOCATION_LIMIT 0 // heap allocation will raise an assert if enabled at runtime #define EIGEN_RUNTIME_NO_MALLOC #include "main.h" #include #include #include #include #include template void nomalloc(const MatrixType& m) { /* this test check no dynamic memory allocation are issued with fixed-size matrices */ typedef typename MatrixType::Scalar Scalar; Index rows = m.rows(); Index cols = m.cols(); MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), m3(rows, cols); Scalar s1 = internal::random(); Index r = internal::random(0, rows-1), c = internal::random(0, cols-1); VERIFY_IS_APPROX((m1+m2)*s1, s1*m1+s1*m2); VERIFY_IS_APPROX((m1+m2)(r,c), (m1(r,c))+(m2(r,c))); VERIFY_IS_APPROX(m1.cwiseProduct(m1.block(0,0,rows,cols)), (m1.array()*m1.array()).matrix()); VERIFY_IS_APPROX((m1*m1.transpose())*m2, m1*(m1.transpose()*m2)); m2.col(0).noalias() = m1 * m1.col(0); m2.col(0).noalias() -= m1.adjoint() * m1.col(0); m2.col(0).noalias() -= m1 * m1.row(0).adjoint(); m2.col(0).noalias() -= m1.adjoint() * m1.row(0).adjoint(); m2.row(0).noalias() = m1.row(0) * m1; m2.row(0).noalias() -= m1.row(0) * m1.adjoint(); m2.row(0).noalias() -= m1.col(0).adjoint() * m1; m2.row(0).noalias() -= m1.col(0).adjoint() * m1.adjoint(); VERIFY_IS_APPROX(m2,m2); m2.col(0).noalias() = m1.template triangularView() * m1.col(0); m2.col(0).noalias() -= m1.adjoint().template triangularView() * m1.col(0); m2.col(0).noalias() -= m1.template triangularView() * m1.row(0).adjoint(); m2.col(0).noalias() -= m1.adjoint().template triangularView() * m1.row(0).adjoint(); m2.row(0).noalias() = m1.row(0) * m1.template triangularView(); m2.row(0).noalias() -= m1.row(0) * m1.adjoint().template triangularView(); m2.row(0).noalias() -= m1.col(0).adjoint() * m1.template triangularView(); m2.row(0).noalias() -= m1.col(0).adjoint() * m1.adjoint().template triangularView(); VERIFY_IS_APPROX(m2,m2); m2.col(0).noalias() = m1.template selfadjointView() * m1.col(0); m2.col(0).noalias() -= m1.adjoint().template selfadjointView() * m1.col(0); m2.col(0).noalias() -= m1.template selfadjointView() * m1.row(0).adjoint(); m2.col(0).noalias() -= m1.adjoint().template selfadjointView() * m1.row(0).adjoint(); m2.row(0).noalias() = m1.row(0) * m1.template selfadjointView(); m2.row(0).noalias() -= m1.row(0) * m1.adjoint().template selfadjointView(); m2.row(0).noalias() -= m1.col(0).adjoint() * m1.template selfadjointView(); m2.row(0).noalias() -= m1.col(0).adjoint() * m1.adjoint().template selfadjointView(); VERIFY_IS_APPROX(m2,m2); m2.template selfadjointView().rankUpdate(m1.col(0),-1); m2.template selfadjointView().rankUpdate(m1.row(0),-1); m2.template selfadjointView().rankUpdate(m1.col(0), m1.col(0)); // rank-2 // The following fancy matrix-matrix products are not safe yet regarding static allocation m2.template selfadjointView().rankUpdate(m1); m2 += m2.template triangularView() * m1; m2.template triangularView() = m2 * m2; m1 += m1.template selfadjointView() * m2; VERIFY_IS_APPROX(m2,m2); } template void ctms_decompositions() { const int maxSize = 16; const int size = 12; typedef Eigen::Matrix Matrix; typedef Eigen::Matrix Vector; typedef Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic, 0, maxSize, maxSize> ComplexMatrix; const Matrix A(Matrix::Random(size, size)), B(Matrix::Random(size, size)); Matrix X(size,size); const ComplexMatrix complexA(ComplexMatrix::Random(size, size)); const Matrix saA = A.adjoint() * A; const Vector b(Vector::Random(size)); Vector x(size); // Cholesky module Eigen::LLT LLT; LLT.compute(A); X = LLT.solve(B); x = LLT.solve(b); Eigen::LDLT LDLT; LDLT.compute(A); X = LDLT.solve(B); x = LDLT.solve(b); // Eigenvalues module Eigen::HessenbergDecomposition hessDecomp; hessDecomp.compute(complexA); Eigen::ComplexSchur cSchur(size); cSchur.compute(complexA); Eigen::ComplexEigenSolver cEigSolver; cEigSolver.compute(complexA); Eigen::EigenSolver eigSolver; eigSolver.compute(A); Eigen::SelfAdjointEigenSolver saEigSolver(size); saEigSolver.compute(saA); Eigen::Tridiagonalization tridiag; tridiag.compute(saA); // LU module Eigen::PartialPivLU ppLU; ppLU.compute(A); X = ppLU.solve(B); x = ppLU.solve(b); Eigen::FullPivLU fpLU; fpLU.compute(A); X = fpLU.solve(B); x = fpLU.solve(b); // QR module Eigen::HouseholderQR hQR; hQR.compute(A); X = hQR.solve(B); x = hQR.solve(b); Eigen::ColPivHouseholderQR cpQR; cpQR.compute(A); X = cpQR.solve(B); x = cpQR.solve(b); Eigen::FullPivHouseholderQR fpQR; fpQR.compute(A); // FIXME X = fpQR.solve(B); x = fpQR.solve(b); // SVD module Eigen::JacobiSVD jSVD; jSVD.compute(A, ComputeFullU | ComputeFullV); } void test_zerosized() { // default constructors: Eigen::MatrixXd A; Eigen::VectorXd v; // explicit zero-sized: Eigen::ArrayXXd A0(0,0); Eigen::ArrayXd v0(0); // assigning empty objects to each other: A=A0; v=v0; } template void test_reference(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; enum { Flag = MatrixType::IsRowMajor ? Eigen::RowMajor : Eigen::ColMajor}; enum { TransposeFlag = !MatrixType::IsRowMajor ? Eigen::RowMajor : Eigen::ColMajor}; Index rows = m.rows(), cols=m.cols(); typedef Eigen::Matrix MatrixX; typedef Eigen::Matrix MatrixXT; // Dynamic reference: typedef Eigen::Ref Ref; typedef Eigen::Ref RefT; Ref r1(m); Ref r2(m.block(rows/3, cols/4, rows/2, cols/2)); RefT r3(m.transpose()); RefT r4(m.topLeftCorner(rows/2, cols/2).transpose()); VERIFY_RAISES_ASSERT(RefT r5(m)); VERIFY_RAISES_ASSERT(Ref r6(m.transpose())); VERIFY_RAISES_ASSERT(Ref r7(Scalar(2) * m)); // Copy constructors shall also never malloc Ref r8 = r1; RefT r9 = r3; // Initializing from a compatible Ref shall also never malloc Eigen::Ref > r10=r8, r11=m; // Initializing from an incompatible Ref will malloc: typedef Eigen::Ref RefAligned; VERIFY_RAISES_ASSERT(RefAligned r12=r10); VERIFY_RAISES_ASSERT(Ref r13=r10); // r10 has more dynamic strides } EIGEN_DECLARE_TEST(nomalloc) { // create some dynamic objects Eigen::MatrixXd M1 = MatrixXd::Random(3,3); Ref R1 = 2.0*M1; // Ref requires temporary // from here on prohibit malloc: Eigen::internal::set_is_malloc_allowed(false); // check that our operator new is indeed called: VERIFY_RAISES_ASSERT(MatrixXd dummy(MatrixXd::Random(3,3))); CALL_SUBTEST_1(nomalloc(Matrix()) ); CALL_SUBTEST_2(nomalloc(Matrix4d()) ); CALL_SUBTEST_3(nomalloc(Matrix()) ); // Check decomposition modules with dynamic matrices that have a known compile-time max size (ctms) CALL_SUBTEST_4(ctms_decompositions()); CALL_SUBTEST_5(test_zerosized()); CALL_SUBTEST_6(test_reference(Matrix())); CALL_SUBTEST_7(test_reference(R1)); CALL_SUBTEST_8(Ref R2 = M1.topRows<2>(); test_reference(R2)); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/nullary.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010-2011 Jitse Niesen // Copyright (C) 2016 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" template bool equalsIdentity(const MatrixType& A) { typedef typename MatrixType::Scalar Scalar; Scalar zero = static_cast(0); bool offDiagOK = true; for (Index i = 0; i < A.rows(); ++i) { for (Index j = i+1; j < A.cols(); ++j) { offDiagOK = offDiagOK && (A(i,j) == zero); } } for (Index i = 0; i < A.rows(); ++i) { for (Index j = 0; j < (std::min)(i, A.cols()); ++j) { offDiagOK = offDiagOK && (A(i,j) == zero); } } bool diagOK = (A.diagonal().array() == 1).all(); return offDiagOK && diagOK; } template void check_extremity_accuracy(const VectorType &v, const typename VectorType::Scalar &low, const typename VectorType::Scalar &high) { typedef typename VectorType::Scalar Scalar; typedef typename VectorType::RealScalar RealScalar; RealScalar prec = internal::is_same::value ? NumTraits::dummy_precision()*10 : NumTraits::dummy_precision()/10; Index size = v.size(); if(size<20) return; for (int i=0; isize-6) { Scalar ref = (low*RealScalar(size-i-1))/RealScalar(size-1) + (high*RealScalar(i))/RealScalar(size-1); if(std::abs(ref)>1) { if(!internal::isApprox(v(i), ref, prec)) std::cout << v(i) << " != " << ref << " ; relative error: " << std::abs((v(i)-ref)/ref) << " ; required precision: " << prec << " ; range: " << low << "," << high << " ; i: " << i << "\n"; VERIFY(internal::isApprox(v(i), (low*RealScalar(size-i-1))/RealScalar(size-1) + (high*RealScalar(i))/RealScalar(size-1), prec)); } } } } template void testVectorType(const VectorType& base) { typedef typename VectorType::Scalar Scalar; typedef typename VectorType::RealScalar RealScalar; const Index size = base.size(); Scalar high = internal::random(-500,500); Scalar low = (size == 1 ? high : internal::random(-500,500)); if (numext::real(low)>numext::real(high)) std::swap(low,high); // check low==high if(internal::random(0.f,1.f)<0.05f) low = high; // check abs(low) >> abs(high) else if(size>2 && std::numeric_limits::max_exponent10>0 && internal::random(0.f,1.f)<0.1f) low = -internal::random(1,2) * RealScalar(std::pow(RealScalar(10),std::numeric_limits::max_exponent10/2)); const Scalar step = ((size == 1) ? 1 : (high-low)/RealScalar(size-1)); // check whether the result yields what we expect it to do VectorType m(base); m.setLinSpaced(size,low,high); if(!NumTraits::IsInteger) { VectorType n(size); for (int i=0; i::IsInteger) || (range_length>=size && (Index(range_length)%(size-1))==0) || (Index(range_length+1)::IsInteger) || (range_length>=size)) for (int i=0; i::IsInteger) CALL_SUBTEST( check_extremity_accuracy(m, low, high) ); } VERIFY( numext::real(m(m.size()-1)) <= numext::real(high) ); VERIFY( (m.array().real() <= numext::real(high)).all() ); VERIFY( (m.array().real() >= numext::real(low)).all() ); VERIFY( numext::real(m(m.size()-1)) >= numext::real(low) ); if(size>=1) { VERIFY( internal::isApprox(m(0),low) ); VERIFY_IS_EQUAL(m(0) , low); } // check whether everything works with row and col major vectors Matrix row_vector(size); Matrix col_vector(size); row_vector.setLinSpaced(size,low,high); col_vector.setLinSpaced(size,low,high); // when using the extended precision (e.g., FPU) the relative error might exceed 1 bit // when computing the squared sum in isApprox, thus the 2x factor. VERIFY( row_vector.isApprox(col_vector.transpose(), RealScalar(2)*NumTraits::epsilon())); Matrix size_changer(size+50); size_changer.setLinSpaced(size,low,high); VERIFY( size_changer.size() == size ); typedef Matrix ScalarMatrix; ScalarMatrix scalar; scalar.setLinSpaced(1,low,high); VERIFY_IS_APPROX( scalar, ScalarMatrix::Constant(high) ); VERIFY_IS_APPROX( ScalarMatrix::LinSpaced(1,low,high), ScalarMatrix::Constant(high) ); // regression test for bug 526 (linear vectorized transversal) if (size > 1 && (!NumTraits::IsInteger)) { m.tail(size-1).setLinSpaced(low, high); VERIFY_IS_APPROX(m(size-1), high); } // regression test for bug 1383 (LinSpaced with empty size/range) { Index n0 = VectorType::SizeAtCompileTime==Dynamic ? 0 : VectorType::SizeAtCompileTime; low = internal::random(); m = VectorType::LinSpaced(n0,low,low-RealScalar(1)); VERIFY(m.size()==n0); if(VectorType::SizeAtCompileTime==Dynamic) { VERIFY_IS_EQUAL(VectorType::LinSpaced(n0,0,Scalar(n0-1)).sum(),Scalar(0)); VERIFY_IS_EQUAL(VectorType::LinSpaced(n0,low,low-RealScalar(1)).sum(),Scalar(0)); } m.setLinSpaced(n0,0,Scalar(n0-1)); VERIFY(m.size()==n0); m.setLinSpaced(n0,low,low-RealScalar(1)); VERIFY(m.size()==n0); // empty range only: VERIFY_IS_APPROX(VectorType::LinSpaced(size,low,low),VectorType::Constant(size,low)); m.setLinSpaced(size,low,low); VERIFY_IS_APPROX(m,VectorType::Constant(size,low)); if(NumTraits::IsInteger) { VERIFY_IS_APPROX( VectorType::LinSpaced(size,low,low+Scalar(size-1)), VectorType::LinSpaced(size,low+Scalar(size-1),low).reverse() ); if(VectorType::SizeAtCompileTime==Dynamic) { // Check negative multiplicator path: for(Index k=1; k<5; ++k) VERIFY_IS_APPROX( VectorType::LinSpaced(size,low,low+Scalar((size-1)*k)), VectorType::LinSpaced(size,low+Scalar((size-1)*k),low).reverse() ); // Check negative divisor path: for(Index k=1; k<5; ++k) VERIFY_IS_APPROX( VectorType::LinSpaced(size*k,low,low+Scalar(size-1)), VectorType::LinSpaced(size*k,low+Scalar(size-1),low).reverse() ); } } } // test setUnit() if(m.size()>0) { for(Index k=0; k<10; ++k) { Index i = internal::random(0,m.size()-1); m.setUnit(i); VERIFY_IS_APPROX( m, VectorType::Unit(m.size(), i) ); } if(VectorType::SizeAtCompileTime==Dynamic) { Index i = internal::random(0,2*m.size()-1); m.setUnit(2*m.size(),i); VERIFY_IS_APPROX( m, VectorType::Unit(m.size(),i) ); } } } template void testMatrixType(const MatrixType& m) { using std::abs; const Index rows = m.rows(); const Index cols = m.cols(); typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; Scalar s1; do { s1 = internal::random(); } while(abs(s1)::IsInteger)); MatrixType A; A.setIdentity(rows, cols); VERIFY(equalsIdentity(A)); VERIFY(equalsIdentity(MatrixType::Identity(rows, cols))); A = MatrixType::Constant(rows,cols,s1); Index i = internal::random(0,rows-1); Index j = internal::random(0,cols-1); VERIFY_IS_APPROX( MatrixType::Constant(rows,cols,s1)(i,j), s1 ); VERIFY_IS_APPROX( MatrixType::Constant(rows,cols,s1).coeff(i,j), s1 ); VERIFY_IS_APPROX( A(i,j), s1 ); } template void bug79() { // Assignment of a RowVectorXd to a MatrixXd (regression test for bug #79). VERIFY( (MatrixXd(RowVectorXd::LinSpaced(3, 0, 1)) - RowVector3d(0, 0.5, 1)).norm() < std::numeric_limits::epsilon() ); } template void bug1630() { Array4d x4 = Array4d::LinSpaced(0.0, 1.0); Array3d x3(Array4d::LinSpaced(0.0, 1.0).head(3)); VERIFY_IS_APPROX(x4.head(3), x3); } template void nullary_overflow() { // Check possible overflow issue int n = 60000; ArrayXi a1(n), a2(n); a1.setLinSpaced(n, 0, n-1); for(int i=0; i void nullary_internal_logic() { // check some internal logic VERIFY(( internal::has_nullary_operator >::value )); VERIFY(( !internal::has_unary_operator >::value )); VERIFY(( !internal::has_binary_operator >::value )); VERIFY(( internal::functor_has_linear_access >::ret )); VERIFY(( !internal::has_nullary_operator >::value )); VERIFY(( !internal::has_unary_operator >::value )); VERIFY(( internal::has_binary_operator >::value )); VERIFY(( !internal::functor_has_linear_access >::ret )); VERIFY(( !internal::has_nullary_operator >::value )); VERIFY(( internal::has_unary_operator >::value )); VERIFY(( !internal::has_binary_operator >::value )); VERIFY(( internal::functor_has_linear_access >::ret )); // Regression unit test for a weird MSVC bug. // Search "nullary_wrapper_workaround_msvc" in CoreEvaluators.h for the details. // See also traits::match. { MatrixXf A = MatrixXf::Random(3,3); Ref R = 2.0*A; VERIFY_IS_APPROX(R, A+A); Ref R1 = MatrixXf::Random(3,3)+A; VectorXi V = VectorXi::Random(3); Ref R2 = VectorXi::LinSpaced(3,1,3)+V; VERIFY_IS_APPROX(R2, V+Vector3i(1,2,3)); VERIFY(( internal::has_nullary_operator >::value )); VERIFY(( !internal::has_unary_operator >::value )); VERIFY(( !internal::has_binary_operator >::value )); VERIFY(( internal::functor_has_linear_access >::ret )); VERIFY(( !internal::has_nullary_operator >::value )); VERIFY(( internal::has_unary_operator >::value )); VERIFY(( !internal::has_binary_operator >::value )); VERIFY(( internal::functor_has_linear_access >::ret )); } } EIGEN_DECLARE_TEST(nullary) { CALL_SUBTEST_1( testMatrixType(Matrix2d()) ); CALL_SUBTEST_2( testMatrixType(MatrixXcf(internal::random(1,300),internal::random(1,300))) ); CALL_SUBTEST_3( testMatrixType(MatrixXf(internal::random(1,300),internal::random(1,300))) ); for(int i = 0; i < g_repeat*10; i++) { CALL_SUBTEST_3( testVectorType(VectorXcd(internal::random(1,30000))) ); CALL_SUBTEST_4( testVectorType(VectorXd(internal::random(1,30000))) ); CALL_SUBTEST_5( testVectorType(Vector4d()) ); // regression test for bug 232 CALL_SUBTEST_6( testVectorType(Vector3d()) ); CALL_SUBTEST_7( testVectorType(VectorXf(internal::random(1,30000))) ); CALL_SUBTEST_8( testVectorType(Vector3f()) ); CALL_SUBTEST_8( testVectorType(Vector4f()) ); CALL_SUBTEST_8( testVectorType(Matrix()) ); CALL_SUBTEST_8( testVectorType(Matrix()) ); CALL_SUBTEST_9( testVectorType(VectorXi(internal::random(1,10))) ); CALL_SUBTEST_9( testVectorType(VectorXi(internal::random(9,300))) ); CALL_SUBTEST_9( testVectorType(Matrix()) ); } CALL_SUBTEST_6( bug79<0>() ); CALL_SUBTEST_6( bug1630<0>() ); CALL_SUBTEST_9( nullary_overflow<0>() ); CALL_SUBTEST_10( nullary_internal_logic<0>() ); } ================================================ FILE: extensions/ngp_raymarch/include/op_include/eigen/test/num_dimensions.cpp ================================================ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2018 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include "main.h" #include template void check_dim(const Xpr& ) { STATIC_CHECK( Xpr::NumDimensions == ExpectedDim ); } #if EIGEN_HAS_CXX11 template